var/home/core/zuul-output/0000755000175000017500000000000015112470572014531 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112501412015461 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005051161615112501403017672 0ustar rootrootNov 29 04:11:12 crc systemd[1]: Starting Kubernetes Kubelet... Nov 29 04:11:12 crc restorecon[4582]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 29 04:11:12 crc restorecon[4582]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 29 04:11:13 crc kubenswrapper[4631]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 29 04:11:13 crc kubenswrapper[4631]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 29 04:11:13 crc kubenswrapper[4631]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 29 04:11:13 crc kubenswrapper[4631]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 29 04:11:13 crc kubenswrapper[4631]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 29 04:11:13 crc kubenswrapper[4631]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.043014 4631 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.045932 4631 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.045954 4631 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.045960 4631 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.045965 4631 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.045970 4631 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.045978 4631 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.045985 4631 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.045991 4631 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.045996 4631 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046007 4631 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046013 4631 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046018 4631 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046022 4631 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046028 4631 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046032 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046038 4631 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046043 4631 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046048 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046053 4631 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046058 4631 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046063 4631 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046068 4631 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046073 4631 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046078 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046082 4631 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046087 4631 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046092 4631 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046097 4631 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046102 4631 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046106 4631 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046111 4631 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046116 4631 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046121 4631 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046125 4631 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046132 4631 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046138 4631 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046144 4631 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046149 4631 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046154 4631 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046160 4631 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046165 4631 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046170 4631 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046175 4631 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046180 4631 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046185 4631 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046190 4631 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046195 4631 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046200 4631 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046204 4631 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046209 4631 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046214 4631 feature_gate.go:330] unrecognized feature gate: Example Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046219 4631 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046223 4631 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046228 4631 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046233 4631 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046237 4631 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046242 4631 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046247 4631 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046252 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046257 4631 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046262 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046268 4631 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046275 4631 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046281 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046286 4631 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046291 4631 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046297 4631 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046302 4631 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046307 4631 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046312 4631 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.046317 4631 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046623 4631 flags.go:64] FLAG: --address="0.0.0.0" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046639 4631 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046648 4631 flags.go:64] FLAG: --anonymous-auth="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046655 4631 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046662 4631 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046668 4631 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046675 4631 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046682 4631 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046690 4631 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046696 4631 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046702 4631 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046707 4631 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046713 4631 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046720 4631 flags.go:64] FLAG: --cgroup-root="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046725 4631 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046731 4631 flags.go:64] FLAG: --client-ca-file="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046736 4631 flags.go:64] FLAG: --cloud-config="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046742 4631 flags.go:64] FLAG: --cloud-provider="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046747 4631 flags.go:64] FLAG: --cluster-dns="[]" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046754 4631 flags.go:64] FLAG: --cluster-domain="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046759 4631 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046765 4631 flags.go:64] FLAG: --config-dir="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046771 4631 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046777 4631 flags.go:64] FLAG: --container-log-max-files="5" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046784 4631 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046790 4631 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046795 4631 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046801 4631 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046807 4631 flags.go:64] FLAG: --contention-profiling="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046812 4631 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046818 4631 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046824 4631 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046830 4631 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046837 4631 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046844 4631 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046850 4631 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046856 4631 flags.go:64] FLAG: --enable-load-reader="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046862 4631 flags.go:64] FLAG: --enable-server="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046868 4631 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046874 4631 flags.go:64] FLAG: --event-burst="100" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046880 4631 flags.go:64] FLAG: --event-qps="50" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046885 4631 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046891 4631 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046897 4631 flags.go:64] FLAG: --eviction-hard="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046904 4631 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046910 4631 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046915 4631 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046921 4631 flags.go:64] FLAG: --eviction-soft="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046927 4631 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046932 4631 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046938 4631 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046943 4631 flags.go:64] FLAG: --experimental-mounter-path="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046949 4631 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046955 4631 flags.go:64] FLAG: --fail-swap-on="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046961 4631 flags.go:64] FLAG: --feature-gates="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046968 4631 flags.go:64] FLAG: --file-check-frequency="20s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046974 4631 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046980 4631 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046986 4631 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046991 4631 flags.go:64] FLAG: --healthz-port="10248" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.046997 4631 flags.go:64] FLAG: --help="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047003 4631 flags.go:64] FLAG: --hostname-override="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047008 4631 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047014 4631 flags.go:64] FLAG: --http-check-frequency="20s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047020 4631 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047025 4631 flags.go:64] FLAG: --image-credential-provider-config="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047031 4631 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047036 4631 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047042 4631 flags.go:64] FLAG: --image-service-endpoint="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047048 4631 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047054 4631 flags.go:64] FLAG: --kube-api-burst="100" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047059 4631 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047065 4631 flags.go:64] FLAG: --kube-api-qps="50" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047070 4631 flags.go:64] FLAG: --kube-reserved="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047076 4631 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047081 4631 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047087 4631 flags.go:64] FLAG: --kubelet-cgroups="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047092 4631 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047098 4631 flags.go:64] FLAG: --lock-file="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047103 4631 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047110 4631 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047115 4631 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047123 4631 flags.go:64] FLAG: --log-json-split-stream="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047129 4631 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047134 4631 flags.go:64] FLAG: --log-text-split-stream="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047140 4631 flags.go:64] FLAG: --logging-format="text" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047145 4631 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047151 4631 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047157 4631 flags.go:64] FLAG: --manifest-url="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047162 4631 flags.go:64] FLAG: --manifest-url-header="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047174 4631 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047180 4631 flags.go:64] FLAG: --max-open-files="1000000" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047187 4631 flags.go:64] FLAG: --max-pods="110" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047192 4631 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047198 4631 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047204 4631 flags.go:64] FLAG: --memory-manager-policy="None" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047210 4631 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047215 4631 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047221 4631 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047227 4631 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047239 4631 flags.go:64] FLAG: --node-status-max-images="50" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047245 4631 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047251 4631 flags.go:64] FLAG: --oom-score-adj="-999" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047257 4631 flags.go:64] FLAG: --pod-cidr="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047262 4631 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047271 4631 flags.go:64] FLAG: --pod-manifest-path="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047276 4631 flags.go:64] FLAG: --pod-max-pids="-1" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047282 4631 flags.go:64] FLAG: --pods-per-core="0" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047288 4631 flags.go:64] FLAG: --port="10250" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047293 4631 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047299 4631 flags.go:64] FLAG: --provider-id="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047304 4631 flags.go:64] FLAG: --qos-reserved="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047310 4631 flags.go:64] FLAG: --read-only-port="10255" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047316 4631 flags.go:64] FLAG: --register-node="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047321 4631 flags.go:64] FLAG: --register-schedulable="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047327 4631 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047364 4631 flags.go:64] FLAG: --registry-burst="10" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047372 4631 flags.go:64] FLAG: --registry-qps="5" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047378 4631 flags.go:64] FLAG: --reserved-cpus="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047384 4631 flags.go:64] FLAG: --reserved-memory="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047391 4631 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047397 4631 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047403 4631 flags.go:64] FLAG: --rotate-certificates="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047409 4631 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047414 4631 flags.go:64] FLAG: --runonce="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047420 4631 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047426 4631 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047431 4631 flags.go:64] FLAG: --seccomp-default="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047437 4631 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.047442 4631 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.049400 4631 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.049543 4631 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050033 4631 flags.go:64] FLAG: --storage-driver-password="root" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050045 4631 flags.go:64] FLAG: --storage-driver-secure="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050053 4631 flags.go:64] FLAG: --storage-driver-table="stats" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050061 4631 flags.go:64] FLAG: --storage-driver-user="root" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050067 4631 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050076 4631 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050083 4631 flags.go:64] FLAG: --system-cgroups="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050090 4631 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050107 4631 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050114 4631 flags.go:64] FLAG: --tls-cert-file="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050120 4631 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050130 4631 flags.go:64] FLAG: --tls-min-version="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050137 4631 flags.go:64] FLAG: --tls-private-key-file="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050143 4631 flags.go:64] FLAG: --topology-manager-policy="none" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050149 4631 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050155 4631 flags.go:64] FLAG: --topology-manager-scope="container" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050161 4631 flags.go:64] FLAG: --v="2" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050172 4631 flags.go:64] FLAG: --version="false" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050180 4631 flags.go:64] FLAG: --vmodule="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050187 4631 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050194 4631 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050381 4631 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050388 4631 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050394 4631 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050401 4631 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050408 4631 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050416 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050422 4631 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050427 4631 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050433 4631 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050439 4631 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050445 4631 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050451 4631 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050456 4631 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050462 4631 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050468 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050473 4631 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050478 4631 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050484 4631 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050489 4631 feature_gate.go:330] unrecognized feature gate: Example Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050494 4631 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050500 4631 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050507 4631 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050514 4631 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050520 4631 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050525 4631 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050531 4631 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050537 4631 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050545 4631 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050550 4631 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050556 4631 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050560 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050567 4631 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050573 4631 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050578 4631 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050583 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050588 4631 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050593 4631 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050598 4631 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050604 4631 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050608 4631 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050613 4631 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050618 4631 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050623 4631 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050628 4631 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050633 4631 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050638 4631 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050643 4631 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050647 4631 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050652 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050658 4631 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050665 4631 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050676 4631 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050681 4631 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050686 4631 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050692 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050697 4631 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050702 4631 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050707 4631 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050712 4631 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050717 4631 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050722 4631 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050728 4631 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050733 4631 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050738 4631 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050743 4631 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050748 4631 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050752 4631 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050757 4631 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050762 4631 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050767 4631 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.050771 4631 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.050780 4631 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.061819 4631 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.061847 4631 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061927 4631 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061935 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061942 4631 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061948 4631 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061953 4631 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061958 4631 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061963 4631 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061968 4631 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061974 4631 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061980 4631 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061987 4631 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061993 4631 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.061999 4631 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062005 4631 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062010 4631 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062017 4631 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062023 4631 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062028 4631 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062034 4631 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062039 4631 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062044 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062049 4631 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062054 4631 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062060 4631 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062065 4631 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062071 4631 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062076 4631 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062081 4631 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062086 4631 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062091 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062096 4631 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062103 4631 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062108 4631 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062113 4631 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062118 4631 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062123 4631 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062128 4631 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062133 4631 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062138 4631 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062143 4631 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062148 4631 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062153 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062159 4631 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062164 4631 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062169 4631 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062396 4631 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062408 4631 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062414 4631 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062421 4631 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062427 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062433 4631 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062440 4631 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062446 4631 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062451 4631 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062456 4631 feature_gate.go:330] unrecognized feature gate: Example Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062461 4631 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062468 4631 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062474 4631 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062480 4631 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062486 4631 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062491 4631 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062496 4631 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062502 4631 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062508 4631 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062514 4631 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062519 4631 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062524 4631 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062529 4631 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062534 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062539 4631 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062544 4631 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.062553 4631 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062704 4631 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062711 4631 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062717 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062722 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062727 4631 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062732 4631 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062737 4631 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062743 4631 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062750 4631 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062755 4631 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062760 4631 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062766 4631 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062771 4631 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062777 4631 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062784 4631 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062790 4631 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062795 4631 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062801 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062806 4631 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062811 4631 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062817 4631 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062822 4631 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062827 4631 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062832 4631 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062837 4631 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062844 4631 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062851 4631 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062857 4631 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062862 4631 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062868 4631 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062873 4631 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062878 4631 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062883 4631 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062888 4631 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062893 4631 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062898 4631 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062903 4631 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062907 4631 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062912 4631 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062917 4631 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062923 4631 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062928 4631 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062933 4631 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062938 4631 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062943 4631 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062947 4631 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062952 4631 feature_gate.go:330] unrecognized feature gate: Example Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062957 4631 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062962 4631 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062967 4631 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062972 4631 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062977 4631 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062982 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062987 4631 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062992 4631 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.062997 4631 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063002 4631 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063007 4631 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063011 4631 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063017 4631 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063021 4631 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063027 4631 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063032 4631 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063038 4631 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063043 4631 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063048 4631 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063056 4631 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063063 4631 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063069 4631 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063075 4631 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.063080 4631 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.063087 4631 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.063236 4631 server.go:940] "Client rotation is on, will bootstrap in background" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.066176 4631 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.066269 4631 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.066962 4631 server.go:997] "Starting client certificate rotation" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.066982 4631 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.067416 4631 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-10 21:36:47.4192209 +0000 UTC Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.067497 4631 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 281h25m34.351740971s for next certificate rotation Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.072440 4631 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.074302 4631 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.082110 4631 log.go:25] "Validated CRI v1 runtime API" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.097944 4631 log.go:25] "Validated CRI v1 image API" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.099667 4631 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.102592 4631 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-29-04-05-57-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.102625 4631 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.117259 4631 manager.go:217] Machine: {Timestamp:2025-11-29 04:11:13.116189851 +0000 UTC m=+0.180693385 CPUVendorID:AuthenticAMD NumCores:8 NumPhysicalCores:1 NumSockets:8 CpuFrequency:2800000 MemoryCapacity:25199480832 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:06b81a2a-46c7-4ed7-b163-1df3ee4c2427 BootID:26b847b6-4c18-4480-bfc6-a52029f99f22 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:3076108 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:12599738368 Type:vfs Inodes:3076108 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:5039898624 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:12599742464 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:2519945216 Type:vfs Inodes:615221 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:429496729600 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:74:4c:00 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:74:4c:00 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:62:2b:ff Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:c1:9b:1c Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:5e:6c:12 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:84:1a:f8 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:aa:bc:a2:2e:a3:7e Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:d6:fe:7c:8d:9a:99 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:25199480832 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.117527 4631 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.117666 4631 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.117987 4631 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.118134 4631 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.118363 4631 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.118540 4631 topology_manager.go:138] "Creating topology manager with none policy" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.118549 4631 container_manager_linux.go:303] "Creating device plugin manager" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.118759 4631 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.118782 4631 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.119026 4631 state_mem.go:36] "Initialized new in-memory state store" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.119098 4631 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.119833 4631 kubelet.go:418] "Attempting to sync node with API server" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.119852 4631 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.119870 4631 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.119883 4631 kubelet.go:324] "Adding apiserver pod source" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.119895 4631 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.122072 4631 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.122417 4631 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123128 4631 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123695 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123716 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123723 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123730 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123741 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123748 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123755 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123768 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123777 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123785 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123794 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123802 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.123981 4631 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.124075 4631 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.190:6443: connect: connection refused Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.124199 4631 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.190:6443: connect: connection refused" logger="UnhandledError" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.124348 4631 server.go:1280] "Started kubelet" Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.124601 4631 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.190:6443: connect: connection refused Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.124692 4631 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.190:6443: connect: connection refused" logger="UnhandledError" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.124706 4631 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.125517 4631 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.190:6443: connect: connection refused Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.124704 4631 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 29 04:11:13 crc systemd[1]: Started Kubernetes Kubelet. Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.126744 4631 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.126763 4631 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.127322 4631 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.127314 4631 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 13:13:35.320760105 +0000 UTC Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.138589 4631 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 297h2m22.182181527s for next certificate rotation Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.131527 4631 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.131505 4631 server.go:460] "Adding debug handlers to kubelet server" Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.139603 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" interval="200ms" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.140057 4631 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.140079 4631 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.139523 4631 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.190:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c5ee0af034a96 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-29 04:11:13.124305558 +0000 UTC m=+0.188809072,LastTimestamp:2025-11-29 04:11:13.124305558 +0000 UTC m=+0.188809072,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.140786 4631 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.141641 4631 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.190:6443: connect: connection refused Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.141699 4631 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.190:6443: connect: connection refused" logger="UnhandledError" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.143153 4631 factory.go:153] Registering CRI-O factory Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.143204 4631 factory.go:221] Registration of the crio container factory successfully Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.143300 4631 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.143315 4631 factory.go:55] Registering systemd factory Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.143327 4631 factory.go:221] Registration of the systemd container factory successfully Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.143386 4631 factory.go:103] Registering Raw factory Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.143409 4631 manager.go:1196] Started watching for new ooms in manager Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.145210 4631 manager.go:319] Starting recovery of all containers Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155597 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155732 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155750 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155763 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155777 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155791 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155803 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155816 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155833 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155846 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155860 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155877 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155887 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155902 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155914 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155925 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155939 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155951 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155963 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155975 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155986 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.155997 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156008 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156021 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156034 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156046 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156061 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156074 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156118 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156132 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156144 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156156 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156169 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156180 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156191 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156203 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156215 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156230 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156242 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156255 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156269 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156280 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156291 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156301 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156312 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156341 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156354 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156366 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156379 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156393 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156405 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156417 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156436 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156451 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156465 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156479 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156491 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156507 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156520 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156532 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156546 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156558 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156570 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156582 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156596 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156609 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156622 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156634 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156647 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156659 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156672 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156684 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156697 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156709 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156721 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156734 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156746 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156758 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156770 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156781 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156797 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156813 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156825 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156838 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156863 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156874 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156885 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156896 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156907 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156920 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156932 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156944 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156956 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156968 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156980 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.156992 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157006 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157017 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157030 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157649 4631 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157676 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157690 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157706 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157720 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157733 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157757 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157772 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157786 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157800 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157813 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157826 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157841 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157856 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157868 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157880 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157894 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157907 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157919 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157931 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157942 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157955 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157969 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157982 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.157995 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158008 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158021 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158035 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158049 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158063 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158075 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158086 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158098 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158128 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158140 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158152 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158164 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158175 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158188 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158200 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158212 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158224 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158236 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158248 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158261 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158274 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158285 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158296 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158307 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158318 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158352 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158366 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158377 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158391 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158403 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158415 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158429 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158440 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158453 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158466 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158479 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158494 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158506 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158518 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158531 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158545 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158558 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158569 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158581 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158594 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158607 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158619 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158631 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158643 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158657 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158668 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158681 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158693 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158706 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158718 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158730 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158743 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158757 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158768 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158781 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158792 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158806 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158819 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158831 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158842 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158855 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158866 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158877 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158891 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158901 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158913 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158923 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158932 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158940 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158948 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158956 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158968 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158980 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.158992 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.159003 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.159014 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.159026 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.159039 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.159052 4631 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.159069 4631 reconstruct.go:97] "Volume reconstruction finished" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.159076 4631 reconciler.go:26] "Reconciler: start to sync state" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.167863 4631 manager.go:324] Recovery completed Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.179785 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.182827 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.182873 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.182891 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.184270 4631 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.184308 4631 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.184357 4631 state_mem.go:36] "Initialized new in-memory state store" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.197289 4631 policy_none.go:49] "None policy: Start" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.201101 4631 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.201161 4631 state_mem.go:35] "Initializing new in-memory state store" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.212204 4631 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.215145 4631 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.215190 4631 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.215215 4631 kubelet.go:2335] "Starting kubelet main sync loop" Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.215272 4631 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.216056 4631 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.190:6443: connect: connection refused Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.216105 4631 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.190:6443: connect: connection refused" logger="UnhandledError" Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.239207 4631 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.253762 4631 manager.go:334] "Starting Device Plugin manager" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.253927 4631 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.253958 4631 server.go:79] "Starting device plugin registration server" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.254509 4631 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.254535 4631 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.254713 4631 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.254784 4631 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.254791 4631 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.265395 4631 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.316021 4631 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.316265 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.318181 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.318264 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.318284 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.318753 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.318810 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.319132 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.320093 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.320126 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.320138 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.320572 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.320603 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.320619 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.320764 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.320902 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.320939 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.321883 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.321910 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.321926 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.321926 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.322078 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.322092 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.322080 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.322171 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.322212 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.322969 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.322988 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.322997 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.323134 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.323621 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.323665 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324047 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324081 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324098 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324242 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324262 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324280 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324466 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324494 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324926 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324955 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.324972 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.325543 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.325755 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.325979 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.340782 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" interval="400ms" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.355168 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.357820 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.357866 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.357884 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.357917 4631 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.358465 4631 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.190:6443: connect: connection refused" node="crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363309 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363379 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363415 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363448 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363477 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363547 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363593 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363632 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363660 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363714 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363756 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363794 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363823 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363850 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.363879 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465494 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465560 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465588 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465612 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465633 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465654 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465674 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465692 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465689 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465773 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465780 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465828 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465842 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465869 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465852 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465876 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465713 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466069 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466138 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466168 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466213 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466231 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.465894 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466205 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466288 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466307 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466311 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466292 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466322 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.466438 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.559265 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.561045 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.561089 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.561107 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.561165 4631 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.561883 4631 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.190:6443: connect: connection refused" node="crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.653822 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.659672 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.679179 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.695428 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.697451 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-e95ef3ae842369fe6b5748252d7ef7833d9f18dbec99013c2d2e579e9766d231 WatchSource:0}: Error finding container e95ef3ae842369fe6b5748252d7ef7833d9f18dbec99013c2d2e579e9766d231: Status 404 returned error can't find the container with id e95ef3ae842369fe6b5748252d7ef7833d9f18dbec99013c2d2e579e9766d231 Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.698028 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-8c5aa35a18abd4b9eba3c9744690378045b0c54a54929da23f9fc12f3ef496b3 WatchSource:0}: Error finding container 8c5aa35a18abd4b9eba3c9744690378045b0c54a54929da23f9fc12f3ef496b3: Status 404 returned error can't find the container with id 8c5aa35a18abd4b9eba3c9744690378045b0c54a54929da23f9fc12f3ef496b3 Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.701512 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.711234 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-e8861fea94be8bc170a9f91a810358c29423ef0e5b87821059880386cbe5f628 WatchSource:0}: Error finding container e8861fea94be8bc170a9f91a810358c29423ef0e5b87821059880386cbe5f628: Status 404 returned error can't find the container with id e8861fea94be8bc170a9f91a810358c29423ef0e5b87821059880386cbe5f628 Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.724170 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-442fa2e0f6598a132e8c073b7705818f1a75fc53a48f5b6ed2112cdd1225d12c WatchSource:0}: Error finding container 442fa2e0f6598a132e8c073b7705818f1a75fc53a48f5b6ed2112cdd1225d12c: Status 404 returned error can't find the container with id 442fa2e0f6598a132e8c073b7705818f1a75fc53a48f5b6ed2112cdd1225d12c Nov 29 04:11:13 crc kubenswrapper[4631]: W1129 04:11:13.728015 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-dca82b62c023358f2de7625dad6b9a20f177e3293f56cade95aaf8504a16f079 WatchSource:0}: Error finding container dca82b62c023358f2de7625dad6b9a20f177e3293f56cade95aaf8504a16f079: Status 404 returned error can't find the container with id dca82b62c023358f2de7625dad6b9a20f177e3293f56cade95aaf8504a16f079 Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.741750 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" interval="800ms" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.962764 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.964822 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.964890 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.964902 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:13 crc kubenswrapper[4631]: I1129 04:11:13.964931 4631 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 04:11:13 crc kubenswrapper[4631]: E1129 04:11:13.965591 4631 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.190:6443: connect: connection refused" node="crc" Nov 29 04:11:14 crc kubenswrapper[4631]: W1129 04:11:14.121466 4631 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.190:6443: connect: connection refused Nov 29 04:11:14 crc kubenswrapper[4631]: E1129 04:11:14.121549 4631 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.190:6443: connect: connection refused" logger="UnhandledError" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.125968 4631 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.190:6443: connect: connection refused Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.220419 4631 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b" exitCode=0 Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.220514 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b"} Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.220650 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e95ef3ae842369fe6b5748252d7ef7833d9f18dbec99013c2d2e579e9766d231"} Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.220752 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.221936 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.221978 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.221990 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.224399 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5"} Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.224431 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"dca82b62c023358f2de7625dad6b9a20f177e3293f56cade95aaf8504a16f079"} Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.227048 4631 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4" exitCode=0 Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.227126 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4"} Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.227155 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"442fa2e0f6598a132e8c073b7705818f1a75fc53a48f5b6ed2112cdd1225d12c"} Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.227243 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.228282 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.228311 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.228323 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.233056 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.233958 4631 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ee7f00ee91110b84305d8aca62531f4ff151324aa48d2d5018b4251db56af872" exitCode=0 Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.234031 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ee7f00ee91110b84305d8aca62531f4ff151324aa48d2d5018b4251db56af872"} Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.234056 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e8861fea94be8bc170a9f91a810358c29423ef0e5b87821059880386cbe5f628"} Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.234139 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.234921 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.234976 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.234990 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.235085 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.235119 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.235130 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.236902 4631 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c" exitCode=0 Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.236945 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c"} Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.236977 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"8c5aa35a18abd4b9eba3c9744690378045b0c54a54929da23f9fc12f3ef496b3"} Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.237052 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.238117 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.238148 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.238159 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:14 crc kubenswrapper[4631]: E1129 04:11:14.543082 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" interval="1.6s" Nov 29 04:11:14 crc kubenswrapper[4631]: W1129 04:11:14.600090 4631 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.190:6443: connect: connection refused Nov 29 04:11:14 crc kubenswrapper[4631]: E1129 04:11:14.600179 4631 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.190:6443: connect: connection refused" logger="UnhandledError" Nov 29 04:11:14 crc kubenswrapper[4631]: W1129 04:11:14.603512 4631 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.190:6443: connect: connection refused Nov 29 04:11:14 crc kubenswrapper[4631]: E1129 04:11:14.603561 4631 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.190:6443: connect: connection refused" logger="UnhandledError" Nov 29 04:11:14 crc kubenswrapper[4631]: W1129 04:11:14.687832 4631 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.190:6443: connect: connection refused Nov 29 04:11:14 crc kubenswrapper[4631]: E1129 04:11:14.687889 4631 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.190:6443: connect: connection refused" logger="UnhandledError" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.765836 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.766913 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.766951 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.766959 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:14 crc kubenswrapper[4631]: I1129 04:11:14.766982 4631 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 04:11:14 crc kubenswrapper[4631]: E1129 04:11:14.767342 4631 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.190:6443: connect: connection refused" node="crc" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.244658 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.244795 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.245992 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.246031 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.246048 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.249970 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.250064 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.250225 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.250092 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.251413 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.251447 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.251464 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.255092 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.255158 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.255188 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.255162 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.256392 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.256428 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.256444 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.259288 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.259355 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.259377 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.259395 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.261251 4631 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a688ab04e6e05182db2c2f3bac369094bf7ecf634c117b01efc61fb757f5a938" exitCode=0 Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.261410 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a688ab04e6e05182db2c2f3bac369094bf7ecf634c117b01efc61fb757f5a938"} Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.262565 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.263577 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.263611 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:15 crc kubenswrapper[4631]: I1129 04:11:15.263627 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.270806 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83"} Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.270992 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.272429 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.272470 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.272486 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.276316 4631 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="9022eec0ea1f259acc9309571973dbc1ea222f0ebf9cda5229aaf33e7ed7b442" exitCode=0 Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.276462 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.277021 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"9022eec0ea1f259acc9309571973dbc1ea222f0ebf9cda5229aaf33e7ed7b442"} Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.277070 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.277176 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.277774 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.278773 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.278816 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.278833 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.279591 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.279626 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.279641 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.280479 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.280511 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.280526 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.367658 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.368978 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.369036 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.369054 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.369088 4631 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 04:11:16 crc kubenswrapper[4631]: I1129 04:11:16.780955 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.283084 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"52f43cf26605dac1b088b7580b5447cfab48e84fd32e325e4ef6415215e09661"} Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.283512 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6100415a919b68a8cdc6dfbae9d3c391ac6db8e3908f39f789d0d1694a3ddc8b"} Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.283531 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ecbe6fce889ea30339f989f5b40fab5157d4c50a77df9ddb8adcb56223755f73"} Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.283195 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.283150 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.283654 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.284524 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.284566 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.284578 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.285099 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.285130 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:17 crc kubenswrapper[4631]: I1129 04:11:17.285142 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.291609 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"62eada265bec693d8dc6f513b807347f2870d5becfb53cdfc0a81c113fcf151b"} Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.291666 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e827ca3b15395b1539d10999b148ca19dcc72e36e2ce2539e6c66f286ea8148f"} Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.291764 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.291803 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.291853 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.293120 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.293165 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.293182 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.293800 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.293863 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.293887 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.564275 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:18 crc kubenswrapper[4631]: I1129 04:11:18.819815 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.294127 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.294588 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.295670 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.295722 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.295752 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.295778 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.295826 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.295869 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.537471 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.582590 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.582740 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.583980 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.584049 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:19 crc kubenswrapper[4631]: I1129 04:11:19.584072 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:20 crc kubenswrapper[4631]: I1129 04:11:20.297814 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:20 crc kubenswrapper[4631]: I1129 04:11:20.297987 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:20 crc kubenswrapper[4631]: I1129 04:11:20.299033 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:20 crc kubenswrapper[4631]: I1129 04:11:20.299090 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:20 crc kubenswrapper[4631]: I1129 04:11:20.299113 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:20 crc kubenswrapper[4631]: I1129 04:11:20.299400 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:20 crc kubenswrapper[4631]: I1129 04:11:20.299459 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:20 crc kubenswrapper[4631]: I1129 04:11:20.299488 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.054886 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.185039 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.185325 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.187317 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.187373 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.187387 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.303205 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.306559 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.306676 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.306699 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.652065 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.652360 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.654117 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.654186 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.654206 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.667903 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:21 crc kubenswrapper[4631]: I1129 04:11:21.706740 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:22 crc kubenswrapper[4631]: I1129 04:11:22.306445 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:22 crc kubenswrapper[4631]: I1129 04:11:22.307902 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:22 crc kubenswrapper[4631]: I1129 04:11:22.307955 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:22 crc kubenswrapper[4631]: I1129 04:11:22.307978 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:23 crc kubenswrapper[4631]: E1129 04:11:23.265875 4631 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 29 04:11:23 crc kubenswrapper[4631]: I1129 04:11:23.308708 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:23 crc kubenswrapper[4631]: I1129 04:11:23.310292 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:23 crc kubenswrapper[4631]: I1129 04:11:23.310442 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:23 crc kubenswrapper[4631]: I1129 04:11:23.310462 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:24 crc kubenswrapper[4631]: I1129 04:11:24.185952 4631 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 04:11:24 crc kubenswrapper[4631]: I1129 04:11:24.186075 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:11:25 crc kubenswrapper[4631]: I1129 04:11:25.126870 4631 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 29 04:11:26 crc kubenswrapper[4631]: E1129 04:11:26.037085 4631 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": net/http: TLS handshake timeout" event="&Event{ObjectMeta:{crc.187c5ee0af034a96 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-29 04:11:13.124305558 +0000 UTC m=+0.188809072,LastTimestamp:2025-11-29 04:11:13.124305558 +0000 UTC m=+0.188809072,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 29 04:11:26 crc kubenswrapper[4631]: E1129 04:11:26.144893 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="3.2s" Nov 29 04:11:26 crc kubenswrapper[4631]: W1129 04:11:26.262051 4631 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 29 04:11:26 crc kubenswrapper[4631]: I1129 04:11:26.262178 4631 trace.go:236] Trace[354260442]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Nov-2025 04:11:16.260) (total time: 10001ms): Nov 29 04:11:26 crc kubenswrapper[4631]: Trace[354260442]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (04:11:26.262) Nov 29 04:11:26 crc kubenswrapper[4631]: Trace[354260442]: [10.001824182s] [10.001824182s] END Nov 29 04:11:26 crc kubenswrapper[4631]: E1129 04:11:26.262211 4631 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 29 04:11:26 crc kubenswrapper[4631]: E1129 04:11:26.370052 4631 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 29 04:11:26 crc kubenswrapper[4631]: I1129 04:11:26.496917 4631 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 29 04:11:26 crc kubenswrapper[4631]: I1129 04:11:26.496973 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 29 04:11:26 crc kubenswrapper[4631]: I1129 04:11:26.506548 4631 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 29 04:11:26 crc kubenswrapper[4631]: I1129 04:11:26.506608 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 29 04:11:28 crc kubenswrapper[4631]: I1129 04:11:28.842739 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 29 04:11:28 crc kubenswrapper[4631]: I1129 04:11:28.842887 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:28 crc kubenswrapper[4631]: I1129 04:11:28.843870 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:28 crc kubenswrapper[4631]: I1129 04:11:28.843905 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:28 crc kubenswrapper[4631]: I1129 04:11:28.843914 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:28 crc kubenswrapper[4631]: I1129 04:11:28.861463 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.325128 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.326533 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.326579 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.326593 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.545063 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.545211 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.546383 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.546448 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.546470 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.552729 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.570698 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.571641 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.571678 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.571690 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.571713 4631 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 04:11:29 crc kubenswrapper[4631]: E1129 04:11:29.574658 4631 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.588922 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.589078 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.590085 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.590119 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:29 crc kubenswrapper[4631]: I1129 04:11:29.590131 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:30 crc kubenswrapper[4631]: I1129 04:11:30.327877 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:11:30 crc kubenswrapper[4631]: I1129 04:11:30.327933 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:30 crc kubenswrapper[4631]: I1129 04:11:30.329035 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:30 crc kubenswrapper[4631]: I1129 04:11:30.329064 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:30 crc kubenswrapper[4631]: I1129 04:11:30.329072 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.460084 4631 trace.go:236] Trace[653259707]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Nov-2025 04:11:16.821) (total time: 14638ms): Nov 29 04:11:31 crc kubenswrapper[4631]: Trace[653259707]: ---"Objects listed" error: 14638ms (04:11:31.460) Nov 29 04:11:31 crc kubenswrapper[4631]: Trace[653259707]: [14.638669607s] [14.638669607s] END Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.460111 4631 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.460741 4631 trace.go:236] Trace[329216795]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Nov-2025 04:11:16.967) (total time: 14493ms): Nov 29 04:11:31 crc kubenswrapper[4631]: Trace[329216795]: ---"Objects listed" error: 14493ms (04:11:31.460) Nov 29 04:11:31 crc kubenswrapper[4631]: Trace[329216795]: [14.493344159s] [14.493344159s] END Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.460777 4631 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.462179 4631 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.498959 4631 trace.go:236] Trace[1893615326]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Nov-2025 04:11:17.739) (total time: 13759ms): Nov 29 04:11:31 crc kubenswrapper[4631]: Trace[1893615326]: ---"Objects listed" error: 13759ms (04:11:31.498) Nov 29 04:11:31 crc kubenswrapper[4631]: Trace[1893615326]: [13.759902872s] [13.759902872s] END Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.498993 4631 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.633047 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.633153 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.633962 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.633987 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.633995 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.636899 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.827655 4631 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:40154->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.827720 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:40154->192.168.126.11:17697: read: connection reset by peer" Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.829526 4631 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 29 04:11:31 crc kubenswrapper[4631]: I1129 04:11:31.829651 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 29 04:11:32 crc kubenswrapper[4631]: I1129 04:11:32.298856 4631 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 29 04:11:32 crc kubenswrapper[4631]: I1129 04:11:32.333216 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 29 04:11:32 crc kubenswrapper[4631]: I1129 04:11:32.337361 4631 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83" exitCode=255 Nov 29 04:11:32 crc kubenswrapper[4631]: I1129 04:11:32.337439 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83"} Nov 29 04:11:32 crc kubenswrapper[4631]: I1129 04:11:32.408781 4631 scope.go:117] "RemoveContainer" containerID="8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83" Nov 29 04:11:32 crc kubenswrapper[4631]: I1129 04:11:32.668902 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.130114 4631 apiserver.go:52] "Watching apiserver" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.133355 4631 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.133957 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-multus/multus-additional-cni-plugins-kcgbc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-ovn-kubernetes/ovnkube-node-2npl6","openshift-dns/node-resolver-jgxpj","openshift-machine-config-operator/machine-config-daemon-6bmtd","openshift-multus/multus-pbk6b","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.134408 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.134463 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.134528 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.134726 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.134923 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.135170 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.135387 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.135431 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.136009 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-jgxpj" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.136119 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.136157 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.136199 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.136215 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.136281 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.140585 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.140841 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.141866 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.141959 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.142498 4631 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.143361 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.145072 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.145081 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.145288 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.145447 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.145480 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.145645 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.145684 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.145722 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.145801 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.145973 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.146214 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.146239 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.146264 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.146354 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.146405 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.146454 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.146490 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.148963 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.149926 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.150634 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.150728 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.153724 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.154746 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.156157 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.163170 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.163550 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172470 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172517 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172541 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172560 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172575 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172591 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172606 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172621 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172637 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172652 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172667 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172681 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172695 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172712 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172742 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172758 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172773 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172791 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172818 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172836 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172851 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172867 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172883 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172900 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172938 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172954 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172970 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.172985 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173005 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173029 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173051 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173088 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173105 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173122 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173154 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173170 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173197 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173223 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173237 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173252 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173268 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173282 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173283 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173297 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173368 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173388 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173407 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173425 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173442 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173463 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173480 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173497 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173513 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173529 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173545 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173561 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173576 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173593 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173609 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173624 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173643 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173658 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173675 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173693 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.173868 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174009 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174045 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174122 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174154 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174356 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174380 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174397 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174415 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174431 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174446 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174465 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174484 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174756 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174880 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174900 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174917 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174933 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174949 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174949 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174999 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.175154 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.175178 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.175272 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.175292 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.175462 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.175559 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.175761 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.175987 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.176081 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.176221 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.176295 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.176390 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.176521 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.176524 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.176905 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.176943 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.176998 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.177207 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.177377 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.177609 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.177798 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178007 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178078 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178255 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178306 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178319 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.174965 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178376 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178398 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178479 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178488 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178496 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178817 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178912 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.179125 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.179397 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.180082 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.180304 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.180802 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.180798 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.180832 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.181124 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.181217 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.181466 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.181520 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.181613 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.181806 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.182275 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.182371 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.182557 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.182585 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.182747 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.182932 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.183153 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.183801 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.183883 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.183967 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.184229 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.184706 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.185008 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.185078 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.185089 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.185411 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.185577 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.185663 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.185979 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.185995 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186035 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186096 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186449 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186502 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.178498 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186553 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186575 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186585 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186670 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186693 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186711 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186728 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186746 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186764 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186781 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186799 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186819 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186837 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186856 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186873 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186890 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186908 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186926 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186943 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186958 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186974 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187000 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187015 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187031 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187046 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187061 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187099 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187115 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187130 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187146 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187161 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187177 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187193 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187208 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187225 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187245 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187267 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187293 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187317 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187352 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187437 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187455 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187473 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187488 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187504 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187520 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187536 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187553 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187571 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187587 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187603 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187619 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187634 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187650 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187666 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187683 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187699 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187714 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187731 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187747 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187763 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187781 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187807 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187826 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187843 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187859 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187876 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187892 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187908 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187924 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187940 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187956 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187973 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187992 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188012 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188033 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188051 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188079 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188100 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188119 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188139 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188159 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188184 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188205 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188227 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188250 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188271 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188289 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188311 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188372 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188395 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188415 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188431 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188450 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188465 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188481 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188495 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188510 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188526 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188542 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188557 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188574 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188590 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188607 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188624 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188640 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188656 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188672 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188686 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188702 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188749 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188767 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188782 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188796 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188811 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188863 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zg8w5\" (UniqueName: \"kubernetes.io/projected/d99f974e-ba9c-4600-81c3-42c629af0c1b-kube-api-access-zg8w5\") pod \"node-resolver-jgxpj\" (UID: \"d99f974e-ba9c-4600-81c3-42c629af0c1b\") " pod="openshift-dns/node-resolver-jgxpj" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188883 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7f871e13-bbe2-4104-8f40-70e695653fef-cni-binary-copy\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188924 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-openvswitch\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188941 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-system-cni-dir\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188956 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/83a25be5-2626-40c4-9f04-e74d576e22d7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188981 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189004 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-var-lib-cni-multus\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189021 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-etc-kubernetes\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189035 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-bin\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189051 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-cnibin\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189069 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-os-release\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189086 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-multus-cni-dir\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189101 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-var-lib-cni-bin\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189116 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-kubelet\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189131 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-etc-openvswitch\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189149 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189164 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-system-cni-dir\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189185 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189227 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-netns\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189243 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-systemd\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189257 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-netd\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189277 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189293 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-rootfs\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189308 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-run-k8s-cni-cncf-io\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189344 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189362 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189377 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-script-lib\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189394 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d99f974e-ba9c-4600-81c3-42c629af0c1b-hosts-file\") pod \"node-resolver-jgxpj\" (UID: \"d99f974e-ba9c-4600-81c3-42c629af0c1b\") " pod="openshift-dns/node-resolver-jgxpj" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189409 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-os-release\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189424 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6w8m\" (UniqueName: \"kubernetes.io/projected/7f871e13-bbe2-4104-8f40-70e695653fef-kube-api-access-x6w8m\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189441 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-multus-conf-dir\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189456 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-ovn-kubernetes\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189472 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8c5wb\" (UniqueName: \"kubernetes.io/projected/83a25be5-2626-40c4-9f04-e74d576e22d7-kube-api-access-8c5wb\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189489 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189505 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-slash\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189522 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189542 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189558 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-proxy-tls\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189573 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-var-lib-kubelet\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189589 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fd5s\" (UniqueName: \"kubernetes.io/projected/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-kube-api-access-9fd5s\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189604 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffhfp\" (UniqueName: \"kubernetes.io/projected/cda25410-78a0-47a1-894f-621a855bd64a-kube-api-access-ffhfp\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189619 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/83a25be5-2626-40c4-9f04-e74d576e22d7-cni-binary-copy\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189638 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-cnibin\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189654 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/7f871e13-bbe2-4104-8f40-70e695653fef-multus-daemon-config\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189669 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189684 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-run-multus-certs\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189702 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189717 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-log-socket\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189733 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-config\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189752 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189767 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189781 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-multus-socket-dir-parent\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189796 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-run-netns\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189810 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-node-log\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189825 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cda25410-78a0-47a1-894f-621a855bd64a-ovn-node-metrics-cert\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189842 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189890 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-systemd-units\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189907 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-var-lib-openvswitch\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189924 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-ovn\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189939 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-env-overrides\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.186780 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187025 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187191 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187272 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187532 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.187817 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188223 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.188565 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189039 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189941 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.190192 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.190424 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.190967 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.191598 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.191925 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.191933 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.191984 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.192404 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.192424 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.192468 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.192526 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.192653 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.192875 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.193056 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.193053 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.193301 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.193549 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.194060 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.194135 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.194454 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.194663 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.195144 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.195436 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.195740 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.195995 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.196227 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.196442 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.196630 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.197470 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.197980 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.198362 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.198486 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.198756 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.199006 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.199103 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.199177 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.199445 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.199482 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.199541 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:11:33.699523338 +0000 UTC m=+20.764026852 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.199787 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.199845 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.199969 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.200057 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.200176 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.200375 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.200384 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.200580 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.200593 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.201002 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.201174 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.201469 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.201629 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.204375 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.204578 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.204768 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.205010 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.205158 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.205208 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.205295 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.205316 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.205557 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.205628 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.205862 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.205999 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.189959 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.206352 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-mcd-auth-proxy-config\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.206398 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-hostroot\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.206458 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.206904 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.206920 4631 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.206993 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:33.706971063 +0000 UTC m=+20.771474617 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207149 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207316 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207445 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207471 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207491 4631 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207497 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207510 4631 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207539 4631 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207561 4631 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207583 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207605 4631 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207627 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207649 4631 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207672 4631 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207711 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207732 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207750 4631 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207768 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207786 4631 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207805 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207823 4631 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207842 4631 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207860 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207878 4631 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207895 4631 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207915 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207933 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207950 4631 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207971 4631 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207989 4631 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208007 4631 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208026 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208044 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208067 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208085 4631 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208103 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208122 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208140 4631 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208158 4631 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208176 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208194 4631 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208212 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208229 4631 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208247 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208267 4631 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208284 4631 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208301 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208319 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208361 4631 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208381 4631 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208400 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208418 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208436 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208454 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208473 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208491 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208509 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208527 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208545 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208564 4631 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208583 4631 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208600 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208618 4631 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208636 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208655 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208677 4631 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208696 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208714 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208732 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208750 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208768 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208785 4631 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208803 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208820 4631 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208838 4631 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208857 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208876 4631 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208912 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208930 4631 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208948 4631 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208967 4631 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208985 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209004 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209022 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209039 4631 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209057 4631 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209076 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209093 4631 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209112 4631 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209129 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209147 4631 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209167 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209185 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209203 4631 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209223 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209253 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209272 4631 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209293 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209312 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209368 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209389 4631 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209406 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209424 4631 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209442 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209460 4631 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209477 4631 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209495 4631 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209513 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209531 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209550 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209571 4631 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209590 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209607 4631 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209625 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209643 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209661 4631 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209678 4631 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209696 4631 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209714 4631 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209731 4631 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209749 4631 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209766 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209788 4631 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209806 4631 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209824 4631 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209841 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209859 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209878 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209896 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209918 4631 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209935 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209953 4631 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209971 4631 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.209988 4631 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210006 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210024 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210042 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210060 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210079 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210078 4631 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.212006 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.207947 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.208104 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210193 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210259 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210744 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210775 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.211124 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.211157 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.211518 4631 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.212415 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:33.712325245 +0000 UTC m=+20.776828799 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.211913 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.216640 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.210097 4631 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217426 4631 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217445 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217456 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217469 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217479 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217490 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217501 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217511 4631 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217520 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217532 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.217543 4631 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.220027 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.232027 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.232237 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.232252 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.232262 4631 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.232348 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:33.73231261 +0000 UTC m=+20.796816124 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.246141 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.246315 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.246347 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.246919 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.247496 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.247804 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.247827 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.247839 4631 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.247882 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:33.747867905 +0000 UTC m=+20.812371419 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.248374 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.248751 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.249014 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.250158 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.252239 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.254095 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.255310 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.256027 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.256163 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.256643 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.257203 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.258262 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.259173 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.261267 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.261725 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.264873 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.265240 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.267230 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.267576 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.268120 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.269509 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.269697 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.269903 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.271997 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.277639 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.280212 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.282535 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.284701 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.292575 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.292935 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.294393 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.294693 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.299834 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.301964 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.307870 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.308509 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.312618 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.314405 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.314396 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.316775 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320581 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-config\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320610 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-run-multus-certs\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320627 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-log-socket\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320643 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320658 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-multus-socket-dir-parent\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320672 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cda25410-78a0-47a1-894f-621a855bd64a-ovn-node-metrics-cert\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320686 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-run-netns\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320700 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-node-log\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320714 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-ovn\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320727 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-systemd-units\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320741 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-var-lib-openvswitch\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320754 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-hostroot\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320768 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-env-overrides\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320808 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-mcd-auth-proxy-config\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320822 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-openvswitch\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320837 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-system-cni-dir\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320853 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zg8w5\" (UniqueName: \"kubernetes.io/projected/d99f974e-ba9c-4600-81c3-42c629af0c1b-kube-api-access-zg8w5\") pod \"node-resolver-jgxpj\" (UID: \"d99f974e-ba9c-4600-81c3-42c629af0c1b\") " pod="openshift-dns/node-resolver-jgxpj" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320867 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7f871e13-bbe2-4104-8f40-70e695653fef-cni-binary-copy\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320879 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-etc-kubernetes\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320892 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-bin\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320906 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/83a25be5-2626-40c4-9f04-e74d576e22d7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320929 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-var-lib-cni-multus\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320943 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-kubelet\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320957 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-etc-openvswitch\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320971 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-cnibin\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320984 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-os-release\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.320997 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-multus-cni-dir\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321010 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-var-lib-cni-bin\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321023 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-system-cni-dir\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321038 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321052 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-run-k8s-cni-cncf-io\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321065 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-netns\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321079 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-systemd\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321093 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-netd\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321116 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-rootfs\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321130 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-os-release\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321144 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6w8m\" (UniqueName: \"kubernetes.io/projected/7f871e13-bbe2-4104-8f40-70e695653fef-kube-api-access-x6w8m\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321163 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-script-lib\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321178 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d99f974e-ba9c-4600-81c3-42c629af0c1b-hosts-file\") pod \"node-resolver-jgxpj\" (UID: \"d99f974e-ba9c-4600-81c3-42c629af0c1b\") " pod="openshift-dns/node-resolver-jgxpj" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321192 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-ovn-kubernetes\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321208 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8c5wb\" (UniqueName: \"kubernetes.io/projected/83a25be5-2626-40c4-9f04-e74d576e22d7-kube-api-access-8c5wb\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321225 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-multus-conf-dir\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321244 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-proxy-tls\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321258 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-var-lib-kubelet\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321277 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-slash\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321292 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321312 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffhfp\" (UniqueName: \"kubernetes.io/projected/cda25410-78a0-47a1-894f-621a855bd64a-kube-api-access-ffhfp\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321339 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/83a25be5-2626-40c4-9f04-e74d576e22d7-cni-binary-copy\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321355 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fd5s\" (UniqueName: \"kubernetes.io/projected/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-kube-api-access-9fd5s\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321368 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-cnibin\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321382 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/7f871e13-bbe2-4104-8f40-70e695653fef-multus-daemon-config\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321396 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321428 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321442 4631 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321454 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321465 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321476 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321487 4631 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321497 4631 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321508 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321518 4631 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321528 4631 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321539 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321548 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321559 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321569 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321579 4631 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321589 4631 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321599 4631 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321609 4631 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321619 4631 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321629 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321639 4631 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321649 4631 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321659 4631 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321669 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321679 4631 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321689 4631 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321699 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321711 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321722 4631 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321732 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321742 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321753 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321762 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321771 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321780 4631 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321788 4631 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321795 4631 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321804 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321813 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321822 4631 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321831 4631 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321839 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321847 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321857 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.321909 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.322451 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-config\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.322486 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-run-multus-certs\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.322506 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-log-socket\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.322525 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.322560 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-multus-socket-dir-parent\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.323211 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.327186 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-run-netns\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.327241 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-node-log\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.327273 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-ovn\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.327275 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cda25410-78a0-47a1-894f-621a855bd64a-ovn-node-metrics-cert\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.327303 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-systemd-units\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.327346 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-var-lib-openvswitch\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.327382 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-hostroot\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.327848 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.328180 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-netns\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.328217 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-systemd\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.328243 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-netd\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.328188 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-run-k8s-cni-cncf-io\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.328479 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-rootfs\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.328530 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-os-release\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.328765 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-var-lib-cni-multus\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.329373 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-script-lib\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.329410 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.329487 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d99f974e-ba9c-4600-81c3-42c629af0c1b-hosts-file\") pod \"node-resolver-jgxpj\" (UID: \"d99f974e-ba9c-4600-81c3-42c629af0c1b\") " pod="openshift-dns/node-resolver-jgxpj" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.329515 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-ovn-kubernetes\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.329662 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-multus-conf-dir\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.329986 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-mcd-auth-proxy-config\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.330081 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-slash\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.330023 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-var-lib-kubelet\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.330351 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-openvswitch\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.330382 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-system-cni-dir\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.330808 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.330907 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.332428 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-etc-kubernetes\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.331893 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-kubelet\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.332623 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-bin\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.332741 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.332805 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.332823 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-env-overrides\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.332984 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-multus-cni-dir\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.333015 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-etc-openvswitch\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.333037 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-cnibin\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.333071 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/83a25be5-2626-40c4-9f04-e74d576e22d7-os-release\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.333099 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-cnibin\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.333129 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-system-cni-dir\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.333150 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7f871e13-bbe2-4104-8f40-70e695653fef-host-var-lib-cni-bin\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.333498 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.336471 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.337308 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-proxy-tls\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.341121 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/83a25be5-2626-40c4-9f04-e74d576e22d7-cni-binary-copy\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.341913 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7f871e13-bbe2-4104-8f40-70e695653fef-cni-binary-copy\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.342044 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/7f871e13-bbe2-4104-8f40-70e695653fef-multus-daemon-config\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.342510 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/83a25be5-2626-40c4-9f04-e74d576e22d7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.342626 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.343268 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.343982 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.344592 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.345661 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.349155 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.349593 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.350394 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.350875 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.352737 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.353816 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8c5wb\" (UniqueName: \"kubernetes.io/projected/83a25be5-2626-40c4-9f04-e74d576e22d7-kube-api-access-8c5wb\") pod \"multus-additional-cni-plugins-kcgbc\" (UID: \"83a25be5-2626-40c4-9f04-e74d576e22d7\") " pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.354147 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.355254 4631 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.356607 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6w8m\" (UniqueName: \"kubernetes.io/projected/7f871e13-bbe2-4104-8f40-70e695653fef-kube-api-access-x6w8m\") pod \"multus-pbk6b\" (UID: \"7f871e13-bbe2-4104-8f40-70e695653fef\") " pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.356990 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.357373 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zg8w5\" (UniqueName: \"kubernetes.io/projected/d99f974e-ba9c-4600-81c3-42c629af0c1b-kube-api-access-zg8w5\") pod \"node-resolver-jgxpj\" (UID: \"d99f974e-ba9c-4600-81c3-42c629af0c1b\") " pod="openshift-dns/node-resolver-jgxpj" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.357660 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.358916 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.359906 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.360857 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffhfp\" (UniqueName: \"kubernetes.io/projected/cda25410-78a0-47a1-894f-621a855bd64a-kube-api-access-ffhfp\") pod \"ovnkube-node-2npl6\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.360878 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.362308 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.363129 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.363558 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.363742 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.364673 4631 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.364886 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.366957 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.368061 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.368629 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.370494 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.370816 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fd5s\" (UniqueName: \"kubernetes.io/projected/cddaf389-3216-4be7-a91d-8bed4a7bb9e9-kube-api-access-9fd5s\") pod \"machine-config-daemon-6bmtd\" (UID: \"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\") " pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.371811 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.372389 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.373065 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.374102 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.374817 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.375804 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.376935 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.377606 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.378463 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.379023 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.379430 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.380017 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.380831 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.381796 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.382516 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.383033 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.383965 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.384746 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.385618 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.386111 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe"} Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.386192 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.393824 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.403046 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.411544 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.423663 4631 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.423700 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.431048 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.441732 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.448487 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.450632 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.467817 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.471231 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 29 04:11:33 crc kubenswrapper[4631]: W1129 04:11:33.482386 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-d6354dfb7ebac8a17cd37b7c5e54569b1de5d3b35409c7237132be83edab98cd WatchSource:0}: Error finding container d6354dfb7ebac8a17cd37b7c5e54569b1de5d3b35409c7237132be83edab98cd: Status 404 returned error can't find the container with id d6354dfb7ebac8a17cd37b7c5e54569b1de5d3b35409c7237132be83edab98cd Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.482611 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.493172 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.499252 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.509933 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: W1129 04:11:33.512115 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcda25410_78a0_47a1_894f_621a855bd64a.slice/crio-6f5d544f678345df368ed32c558318be9c0cb311477d41759e7682ddc7cda8f3 WatchSource:0}: Error finding container 6f5d544f678345df368ed32c558318be9c0cb311477d41759e7682ddc7cda8f3: Status 404 returned error can't find the container with id 6f5d544f678345df368ed32c558318be9c0cb311477d41759e7682ddc7cda8f3 Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.520744 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.521146 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.528438 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.543316 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: W1129 04:11:33.545778 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83a25be5_2626_40c4_9f04_e74d576e22d7.slice/crio-6baf82ed19cbf6f35dba6c95d4c9f3def1656f6d852b45c0a6e5bb12f9f3d401 WatchSource:0}: Error finding container 6baf82ed19cbf6f35dba6c95d4c9f3def1656f6d852b45c0a6e5bb12f9f3d401: Status 404 returned error can't find the container with id 6baf82ed19cbf6f35dba6c95d4c9f3def1656f6d852b45c0a6e5bb12f9f3d401 Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.547574 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-jgxpj" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.549197 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.559897 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.566289 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.571797 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.572985 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.586175 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: W1129 04:11:33.591206 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcddaf389_3216_4be7_a91d_8bed4a7bb9e9.slice/crio-7eb20931b7f2d90c99dfbbf40249b57d26a7609fbc1a7f6dc22644b1a374b7c6 WatchSource:0}: Error finding container 7eb20931b7f2d90c99dfbbf40249b57d26a7609fbc1a7f6dc22644b1a374b7c6: Status 404 returned error can't find the container with id 7eb20931b7f2d90c99dfbbf40249b57d26a7609fbc1a7f6dc22644b1a374b7c6 Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.600766 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-pbk6b" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.604820 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.621101 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.634180 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.646771 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.663799 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.673828 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.681611 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.690381 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.698860 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.707192 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.716205 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.729538 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.729648 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.729676 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.729767 4631 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.729818 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:34.729804677 +0000 UTC m=+21.794308181 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.729863 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:11:34.729857938 +0000 UTC m=+21.794361452 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.729891 4631 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.729910 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:34.729904779 +0000 UTC m=+21.794408283 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.734190 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.745450 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.759032 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.770384 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.830784 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:33 crc kubenswrapper[4631]: I1129 04:11:33.830830 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.830933 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.830948 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.830957 4631 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.830996 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:34.830984202 +0000 UTC m=+21.895487716 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.831043 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.831052 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.831059 4631 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:33 crc kubenswrapper[4631]: E1129 04:11:33.831078 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:34.831072694 +0000 UTC m=+21.895576208 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.352815 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-pbk6b" event={"ID":"7f871e13-bbe2-4104-8f40-70e695653fef","Type":"ContainerStarted","Data":"323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.353049 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-pbk6b" event={"ID":"7f871e13-bbe2-4104-8f40-70e695653fef","Type":"ContainerStarted","Data":"687cc251c5035fdcdc56a0f0859b71bc8a96b656284caf1587da07c69ec5dcf4"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.355385 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.355421 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"43d8adf635b56f91b53cd2952890961c023d2adf1627430d671e32ad5070afb3"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.358086 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.358107 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.358116 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"7eb20931b7f2d90c99dfbbf40249b57d26a7609fbc1a7f6dc22644b1a374b7c6"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.359527 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"a6c6437e2cac44bc5a764280e73c5ee1e4a3035be7f633fdbde08f6ab3118cbb"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.361163 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-jgxpj" event={"ID":"d99f974e-ba9c-4600-81c3-42c629af0c1b","Type":"ContainerStarted","Data":"9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.361181 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-jgxpj" event={"ID":"d99f974e-ba9c-4600-81c3-42c629af0c1b","Type":"ContainerStarted","Data":"9cc69a3e7814445a8112eb7c49ee3ada6a6444fa183ae318fcbff2985bc5e8bd"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.362986 4631 generic.go:334] "Generic (PLEG): container finished" podID="83a25be5-2626-40c4-9f04-e74d576e22d7" containerID="3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c" exitCode=0 Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.363023 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" event={"ID":"83a25be5-2626-40c4-9f04-e74d576e22d7","Type":"ContainerDied","Data":"3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.363037 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" event={"ID":"83a25be5-2626-40c4-9f04-e74d576e22d7","Type":"ContainerStarted","Data":"6baf82ed19cbf6f35dba6c95d4c9f3def1656f6d852b45c0a6e5bb12f9f3d401"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.366310 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.367744 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54" exitCode=0 Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.367822 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.367863 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"6f5d544f678345df368ed32c558318be9c0cb311477d41759e7682ddc7cda8f3"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.370404 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.370433 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.370454 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d6354dfb7ebac8a17cd37b7c5e54569b1de5d3b35409c7237132be83edab98cd"} Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.378465 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.390693 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.413565 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.425078 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.439029 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.458765 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.472463 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.484301 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.500680 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.511920 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.523721 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.537812 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.549305 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.561971 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.573705 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.586219 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.598209 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.617714 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.627503 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.644483 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.662715 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.674735 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.688824 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.703792 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.715543 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.733313 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-5fvhl"] Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.733662 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.737047 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.737105 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.739738 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.739859 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.739904 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.740034 4631 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.740085 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:36.740069719 +0000 UTC m=+23.804573253 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.740143 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:11:36.74013579 +0000 UTC m=+23.804639324 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.740182 4631 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.740222 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:36.740214372 +0000 UTC m=+23.804717906 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.752545 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.771266 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.806857 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.840394 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c91a384b-14a6-429c-a5f3-81f62e36d97d-host\") pod \"node-ca-5fvhl\" (UID: \"c91a384b-14a6-429c-a5f3-81f62e36d97d\") " pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.840426 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c91a384b-14a6-429c-a5f3-81f62e36d97d-serviceca\") pod \"node-ca-5fvhl\" (UID: \"c91a384b-14a6-429c-a5f3-81f62e36d97d\") " pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.840449 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zplv\" (UniqueName: \"kubernetes.io/projected/c91a384b-14a6-429c-a5f3-81f62e36d97d-kube-api-access-4zplv\") pod \"node-ca-5fvhl\" (UID: \"c91a384b-14a6-429c-a5f3-81f62e36d97d\") " pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.840475 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.840568 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.840580 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.840592 4631 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.840645 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:36.840632378 +0000 UTC m=+23.905135892 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.840670 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.840832 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.840864 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.840874 4631 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:34 crc kubenswrapper[4631]: E1129 04:11:34.840928 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:36.840913805 +0000 UTC m=+23.905417319 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.844059 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.886968 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.918740 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.941637 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zplv\" (UniqueName: \"kubernetes.io/projected/c91a384b-14a6-429c-a5f3-81f62e36d97d-kube-api-access-4zplv\") pod \"node-ca-5fvhl\" (UID: \"c91a384b-14a6-429c-a5f3-81f62e36d97d\") " pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.941714 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c91a384b-14a6-429c-a5f3-81f62e36d97d-host\") pod \"node-ca-5fvhl\" (UID: \"c91a384b-14a6-429c-a5f3-81f62e36d97d\") " pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.941733 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c91a384b-14a6-429c-a5f3-81f62e36d97d-serviceca\") pod \"node-ca-5fvhl\" (UID: \"c91a384b-14a6-429c-a5f3-81f62e36d97d\") " pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.941840 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c91a384b-14a6-429c-a5f3-81f62e36d97d-host\") pod \"node-ca-5fvhl\" (UID: \"c91a384b-14a6-429c-a5f3-81f62e36d97d\") " pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.942505 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c91a384b-14a6-429c-a5f3-81f62e36d97d-serviceca\") pod \"node-ca-5fvhl\" (UID: \"c91a384b-14a6-429c-a5f3-81f62e36d97d\") " pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.958446 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:34 crc kubenswrapper[4631]: I1129 04:11:34.990737 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zplv\" (UniqueName: \"kubernetes.io/projected/c91a384b-14a6-429c-a5f3-81f62e36d97d-kube-api-access-4zplv\") pod \"node-ca-5fvhl\" (UID: \"c91a384b-14a6-429c-a5f3-81f62e36d97d\") " pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.019577 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.044966 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-5fvhl" Nov 29 04:11:35 crc kubenswrapper[4631]: W1129 04:11:35.060549 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc91a384b_14a6_429c_a5f3_81f62e36d97d.slice/crio-a7e2432d131a6204bcc0f1bd081e1917157282088684628a726945a3bf107b81 WatchSource:0}: Error finding container a7e2432d131a6204bcc0f1bd081e1917157282088684628a726945a3bf107b81: Status 404 returned error can't find the container with id a7e2432d131a6204bcc0f1bd081e1917157282088684628a726945a3bf107b81 Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.061641 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.103640 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.140445 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.184774 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.215951 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.215968 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.216007 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:35 crc kubenswrapper[4631]: E1129 04:11:35.216070 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:35 crc kubenswrapper[4631]: E1129 04:11:35.216152 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:35 crc kubenswrapper[4631]: E1129 04:11:35.216214 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.220073 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.220780 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.221212 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.280091 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.300849 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.337788 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.377252 4631 generic.go:334] "Generic (PLEG): container finished" podID="83a25be5-2626-40c4-9f04-e74d576e22d7" containerID="870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc" exitCode=0 Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.377388 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" event={"ID":"83a25be5-2626-40c4-9f04-e74d576e22d7","Type":"ContainerDied","Data":"870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc"} Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.381892 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215"} Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.381940 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7"} Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.381954 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b"} Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.381967 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b"} Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.381977 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132"} Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.381989 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396"} Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.384077 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-5fvhl" event={"ID":"c91a384b-14a6-429c-a5f3-81f62e36d97d","Type":"ContainerStarted","Data":"ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0"} Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.384105 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-5fvhl" event={"ID":"c91a384b-14a6-429c-a5f3-81f62e36d97d","Type":"ContainerStarted","Data":"a7e2432d131a6204bcc0f1bd081e1917157282088684628a726945a3bf107b81"} Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.398161 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.419010 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.464058 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.502195 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.539882 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.579564 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.621707 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.661412 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.702686 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.743093 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.797727 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.825355 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.865619 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.908490 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.948491 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.975666 4631 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.978195 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.978267 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.978287 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.978453 4631 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 29 04:11:35 crc kubenswrapper[4631]: I1129 04:11:35.989731 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.034775 4631 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.035099 4631 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.036246 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.036283 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.036296 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.036314 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.036326 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.059688 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.064311 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.064384 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.064402 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.064428 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.064446 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.065738 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.080203 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.083759 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.083898 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.084010 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.084145 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.084249 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.100196 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.104059 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.104094 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.104103 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.104118 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.104127 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.108914 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.123745 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.126932 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.126965 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.126974 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.126986 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.126995 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.140461 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.142774 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.142882 4631 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.144471 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.144525 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.144548 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.144576 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.144595 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.183943 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.221870 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.246449 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.246482 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.246503 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.246518 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.246528 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.263651 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.301529 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.342227 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.348482 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.348530 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.348540 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.348554 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.348563 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.382774 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.391158 4631 generic.go:334] "Generic (PLEG): container finished" podID="83a25be5-2626-40c4-9f04-e74d576e22d7" containerID="0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6" exitCode=0 Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.391241 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" event={"ID":"83a25be5-2626-40c4-9f04-e74d576e22d7","Type":"ContainerDied","Data":"0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.393124 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.432521 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.451851 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.451896 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.451909 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.451954 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.451965 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.463149 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.504968 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.542937 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.555855 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.555885 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.555893 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.555907 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.555917 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.584550 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.621219 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.658631 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.658673 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.658684 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.658699 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.658709 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.674151 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.698662 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.744948 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.760599 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.760725 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.760743 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.760775 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.760786 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.760801 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.760754 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.760821 4631 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.760812 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.760863 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:11:40.760824427 +0000 UTC m=+27.825327981 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.760880 4631 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.760913 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:40.760897969 +0000 UTC m=+27.825401483 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.760975 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:40.760949 +0000 UTC m=+27.825452544 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.787238 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.819560 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.861239 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.861360 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.861467 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.861499 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.861498 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.861525 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.861548 4631 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.861551 4631 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.861601 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:40.861586472 +0000 UTC m=+27.926089996 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:36 crc kubenswrapper[4631]: E1129 04:11:36.861619 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:40.861611523 +0000 UTC m=+27.926115057 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.862786 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.862814 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.862827 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.862844 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.862865 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.865680 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.905889 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.942550 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.966289 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.966347 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.966361 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.966383 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.966397 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:36Z","lastTransitionTime":"2025-11-29T04:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:36 crc kubenswrapper[4631]: I1129 04:11:36.989998 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.023115 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.061887 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.068882 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.068915 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.068927 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.068948 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.068960 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:37Z","lastTransitionTime":"2025-11-29T04:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.171532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.171593 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.171605 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.171628 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.171644 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:37Z","lastTransitionTime":"2025-11-29T04:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.216040 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.216093 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.216181 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:37 crc kubenswrapper[4631]: E1129 04:11:37.216376 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:37 crc kubenswrapper[4631]: E1129 04:11:37.216499 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:37 crc kubenswrapper[4631]: E1129 04:11:37.216595 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.274325 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.274386 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.274400 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.274420 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.274433 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:37Z","lastTransitionTime":"2025-11-29T04:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.377232 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.377453 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.377533 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.377609 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.377666 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:37Z","lastTransitionTime":"2025-11-29T04:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.398797 4631 generic.go:334] "Generic (PLEG): container finished" podID="83a25be5-2626-40c4-9f04-e74d576e22d7" containerID="4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135" exitCode=0 Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.398882 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" event={"ID":"83a25be5-2626-40c4-9f04-e74d576e22d7","Type":"ContainerDied","Data":"4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.409298 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.417359 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.433731 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.446294 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.459766 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.478910 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.479460 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.479483 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.479495 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.479512 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.479525 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:37Z","lastTransitionTime":"2025-11-29T04:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.488085 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.501480 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.515189 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.530876 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.543070 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.552869 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.566617 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.579400 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.583776 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.583937 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.584035 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.584143 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.584227 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:37Z","lastTransitionTime":"2025-11-29T04:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.625722 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.687575 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.687651 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.687668 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.687693 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.687711 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:37Z","lastTransitionTime":"2025-11-29T04:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.790494 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.790538 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.790550 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.790568 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.790581 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:37Z","lastTransitionTime":"2025-11-29T04:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.893924 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.893980 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.893997 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.894021 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.894037 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:37Z","lastTransitionTime":"2025-11-29T04:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.997633 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.997678 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.997691 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.997709 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:37 crc kubenswrapper[4631]: I1129 04:11:37.997721 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:37Z","lastTransitionTime":"2025-11-29T04:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.100258 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.100303 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.100314 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.100350 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.100362 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:38Z","lastTransitionTime":"2025-11-29T04:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.203018 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.203075 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.203092 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.203117 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.203133 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:38Z","lastTransitionTime":"2025-11-29T04:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.306715 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.306767 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.306779 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.306796 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.306807 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:38Z","lastTransitionTime":"2025-11-29T04:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.410676 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.410733 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.410750 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.410778 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.410795 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:38Z","lastTransitionTime":"2025-11-29T04:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.416155 4631 generic.go:334] "Generic (PLEG): container finished" podID="83a25be5-2626-40c4-9f04-e74d576e22d7" containerID="bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7" exitCode=0 Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.416209 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" event={"ID":"83a25be5-2626-40c4-9f04-e74d576e22d7","Type":"ContainerDied","Data":"bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7"} Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.437293 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.458544 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.482062 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.496109 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.513493 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.513527 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.513607 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.513627 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.513701 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:38Z","lastTransitionTime":"2025-11-29T04:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.513972 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.529843 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.544632 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.564437 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.580355 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.593637 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.609538 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.616176 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.616212 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.616224 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.616283 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.616296 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:38Z","lastTransitionTime":"2025-11-29T04:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.627067 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.638370 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.655218 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:38Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.718420 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.718636 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.718698 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.718716 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.718725 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:38Z","lastTransitionTime":"2025-11-29T04:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.821718 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.821771 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.821784 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.821803 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.821816 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:38Z","lastTransitionTime":"2025-11-29T04:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.924977 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.925026 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.925043 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.925067 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:38 crc kubenswrapper[4631]: I1129 04:11:38.925095 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:38Z","lastTransitionTime":"2025-11-29T04:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.028288 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.028390 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.028412 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.028441 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.028460 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:39Z","lastTransitionTime":"2025-11-29T04:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.132057 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.132140 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.132161 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.132195 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.132215 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:39Z","lastTransitionTime":"2025-11-29T04:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.216423 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.216470 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.216561 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:39 crc kubenswrapper[4631]: E1129 04:11:39.216620 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:39 crc kubenswrapper[4631]: E1129 04:11:39.216746 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:39 crc kubenswrapper[4631]: E1129 04:11:39.216896 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.234728 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.234778 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.234794 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.234818 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.234836 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:39Z","lastTransitionTime":"2025-11-29T04:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.338149 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.338225 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.338311 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.338385 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.338473 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:39Z","lastTransitionTime":"2025-11-29T04:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.424817 4631 generic.go:334] "Generic (PLEG): container finished" podID="83a25be5-2626-40c4-9f04-e74d576e22d7" containerID="a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619" exitCode=0 Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.424866 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" event={"ID":"83a25be5-2626-40c4-9f04-e74d576e22d7","Type":"ContainerDied","Data":"a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.442157 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.442207 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.442218 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.442232 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.442244 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:39Z","lastTransitionTime":"2025-11-29T04:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.445824 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.466650 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.480906 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.498703 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.513756 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.529672 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.544613 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.544657 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.544673 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.544692 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.544708 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:39Z","lastTransitionTime":"2025-11-29T04:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.546325 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.566519 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.578951 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.589329 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.606175 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.619199 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.632078 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.646654 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:39Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.647409 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.647436 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.647445 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.647459 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.647470 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:39Z","lastTransitionTime":"2025-11-29T04:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.750517 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.750560 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.750577 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.750600 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.750617 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:39Z","lastTransitionTime":"2025-11-29T04:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.853184 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.853518 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.853532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.853549 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.853562 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:39Z","lastTransitionTime":"2025-11-29T04:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.955990 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.956016 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.956028 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.956043 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:39 crc kubenswrapper[4631]: I1129 04:11:39.956055 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:39Z","lastTransitionTime":"2025-11-29T04:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.058603 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.058647 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.058658 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.058675 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.058686 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:40Z","lastTransitionTime":"2025-11-29T04:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.161719 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.161774 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.161792 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.161814 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.161831 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:40Z","lastTransitionTime":"2025-11-29T04:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.264593 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.264649 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.264668 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.264692 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.264710 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:40Z","lastTransitionTime":"2025-11-29T04:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.368357 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.368402 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.368422 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.368446 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.368462 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:40Z","lastTransitionTime":"2025-11-29T04:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.435286 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" event={"ID":"83a25be5-2626-40c4-9f04-e74d576e22d7","Type":"ContainerStarted","Data":"b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.443912 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.444219 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.444386 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.456066 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.470170 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.471364 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.471402 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.471414 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.471431 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.471443 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:40Z","lastTransitionTime":"2025-11-29T04:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.472454 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.474832 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.484153 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.495246 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.513253 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.524317 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.540255 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.553560 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.567564 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.574006 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.574049 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.574063 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.574079 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.574091 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:40Z","lastTransitionTime":"2025-11-29T04:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.584644 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.598685 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.617197 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.631974 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.643062 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.658129 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.673708 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.681697 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.681742 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.681756 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.681774 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.681787 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:40Z","lastTransitionTime":"2025-11-29T04:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.692861 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.705412 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.721803 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.738974 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.756241 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.771522 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.784945 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.784995 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.785008 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.785026 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.785039 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:40Z","lastTransitionTime":"2025-11-29T04:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.787381 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.805691 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.805874 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:11:48.805843862 +0000 UTC m=+35.870347396 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.805954 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.806045 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.806133 4631 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.806182 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:48.80617267 +0000 UTC m=+35.870676194 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.806215 4631 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.806376 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:48.806305734 +0000 UTC m=+35.870809318 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.816591 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.836422 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.855144 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.868045 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.881111 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:40Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.887007 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.887064 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.887081 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.887105 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.887126 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:40Z","lastTransitionTime":"2025-11-29T04:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.906635 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.906917 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.906957 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.906977 4631 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.906997 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.907031 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.907045 4631 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.906754 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.907120 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:48.907060668 +0000 UTC m=+35.971564222 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:40 crc kubenswrapper[4631]: E1129 04:11:40.907151 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:48.90713782 +0000 UTC m=+35.971641374 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.989885 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.989934 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.989951 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.989976 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:40 crc kubenswrapper[4631]: I1129 04:11:40.989995 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:40Z","lastTransitionTime":"2025-11-29T04:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.092737 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.092792 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.092813 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.092837 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.092856 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:41Z","lastTransitionTime":"2025-11-29T04:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.196386 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.196447 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.196465 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.196491 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.196510 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:41Z","lastTransitionTime":"2025-11-29T04:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.219869 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:41 crc kubenswrapper[4631]: E1129 04:11:41.220033 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.220560 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:41 crc kubenswrapper[4631]: E1129 04:11:41.220662 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.220728 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:41 crc kubenswrapper[4631]: E1129 04:11:41.220807 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.299496 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.299559 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.299577 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.299602 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.299620 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:41Z","lastTransitionTime":"2025-11-29T04:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.402573 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.402650 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.402673 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.402698 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.402715 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:41Z","lastTransitionTime":"2025-11-29T04:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.447879 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.505374 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.505415 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.505427 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.505447 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.505459 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:41Z","lastTransitionTime":"2025-11-29T04:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.607685 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.607727 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.607738 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.607753 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.607763 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:41Z","lastTransitionTime":"2025-11-29T04:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.709421 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.709658 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.709740 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.709827 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.709908 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:41Z","lastTransitionTime":"2025-11-29T04:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.812459 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.812678 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.812803 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.812897 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.812988 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:41Z","lastTransitionTime":"2025-11-29T04:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.915995 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.916046 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.916062 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.916083 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:41 crc kubenswrapper[4631]: I1129 04:11:41.916099 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:41Z","lastTransitionTime":"2025-11-29T04:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.017907 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.017982 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.018003 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.018022 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.018072 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:42Z","lastTransitionTime":"2025-11-29T04:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.125649 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.125708 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.125732 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.125758 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.125778 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:42Z","lastTransitionTime":"2025-11-29T04:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.229045 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.229092 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.229109 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.229132 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.229148 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:42Z","lastTransitionTime":"2025-11-29T04:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.332840 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.332900 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.332923 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.332950 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.332971 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:42Z","lastTransitionTime":"2025-11-29T04:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.436471 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.436538 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.436561 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.436593 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.436616 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:42Z","lastTransitionTime":"2025-11-29T04:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.450856 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.539679 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.539735 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.539748 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.540161 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.540177 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:42Z","lastTransitionTime":"2025-11-29T04:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.643297 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.643594 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.643740 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.643884 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.644012 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:42Z","lastTransitionTime":"2025-11-29T04:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.746879 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.746939 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.746956 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.746981 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.746999 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:42Z","lastTransitionTime":"2025-11-29T04:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.849923 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.850033 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.850055 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.850117 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.850135 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:42Z","lastTransitionTime":"2025-11-29T04:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.952497 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.952547 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.952566 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.952589 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:42 crc kubenswrapper[4631]: I1129 04:11:42.952607 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:42Z","lastTransitionTime":"2025-11-29T04:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.055822 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.055922 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.055985 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.056010 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.056062 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:43Z","lastTransitionTime":"2025-11-29T04:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.159246 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.159291 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.159307 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.159363 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.159382 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:43Z","lastTransitionTime":"2025-11-29T04:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.215603 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.215726 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:43 crc kubenswrapper[4631]: E1129 04:11:43.216158 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.215947 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:43 crc kubenswrapper[4631]: E1129 04:11:43.217085 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:43 crc kubenswrapper[4631]: E1129 04:11:43.217172 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.240868 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.260463 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.262678 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.262740 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.262763 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.262793 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.262815 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:43Z","lastTransitionTime":"2025-11-29T04:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.288192 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.304371 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.319482 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.340587 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.360857 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.365641 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.365857 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.366029 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.366258 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.366484 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:43Z","lastTransitionTime":"2025-11-29T04:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.382766 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.395835 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.418313 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.437066 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.455305 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/0.log" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.459683 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09" exitCode=1 Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.459751 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.460542 4631 scope.go:117] "RemoveContainer" containerID="c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.468493 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.468879 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.468904 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.468912 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.468947 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.468961 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:43Z","lastTransitionTime":"2025-11-29T04:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.487974 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.501901 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.516670 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.533123 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.552692 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.575155 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.575230 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.575254 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.575282 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.575301 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:43Z","lastTransitionTime":"2025-11-29T04:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.607248 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.626890 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.647353 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.658862 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.675478 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.677583 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.677619 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.677627 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.677643 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.677652 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:43Z","lastTransitionTime":"2025-11-29T04:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.688961 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.705727 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:42Z\\\",\\\"message\\\":\\\" *v1.Pod event handler 6 for removal\\\\nI1129 04:11:41.979555 5856 factory.go:656] Stopping watch factory\\\\nI1129 04:11:41.979561 5856 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:41.979782 5856 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.979864 5856 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:11:41.978980 5856 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:41.980290 5856 handler.go:208] Removed *v1.Node event handler 7\\\\nI1129 04:11:41.979088 5856 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980583 5856 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:41.980601 5856 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:41.980612 5856 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:41.979353 5856 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980730 5856 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:11:41.980751 5856 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.717523 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.732280 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.751024 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.766266 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.780415 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.780474 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.780496 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.780524 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.780542 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:43Z","lastTransitionTime":"2025-11-29T04:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.883399 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.883469 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.883492 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.883520 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.883537 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:43Z","lastTransitionTime":"2025-11-29T04:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.999028 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.999080 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.999098 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.999124 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:43 crc kubenswrapper[4631]: I1129 04:11:43.999142 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:43Z","lastTransitionTime":"2025-11-29T04:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.101218 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.101245 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.101257 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.101272 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.101284 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:44Z","lastTransitionTime":"2025-11-29T04:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.203756 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.203828 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.203847 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.203878 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.203899 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:44Z","lastTransitionTime":"2025-11-29T04:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.306826 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.306857 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.306868 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.306882 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.306894 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:44Z","lastTransitionTime":"2025-11-29T04:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.409242 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.409312 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.409331 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.409381 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.409398 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:44Z","lastTransitionTime":"2025-11-29T04:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.465388 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/0.log" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.467529 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.467658 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.487604 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.501322 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.511890 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.511931 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.511958 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.511976 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.511986 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:44Z","lastTransitionTime":"2025-11-29T04:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.521071 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.532630 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.545578 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.558316 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.572519 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.583293 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.600379 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.614165 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.614282 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.614381 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.614453 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.614507 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:44Z","lastTransitionTime":"2025-11-29T04:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.617191 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.629746 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.641565 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.657235 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.677683 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:42Z\\\",\\\"message\\\":\\\" *v1.Pod event handler 6 for removal\\\\nI1129 04:11:41.979555 5856 factory.go:656] Stopping watch factory\\\\nI1129 04:11:41.979561 5856 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:41.979782 5856 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.979864 5856 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:11:41.978980 5856 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:41.980290 5856 handler.go:208] Removed *v1.Node event handler 7\\\\nI1129 04:11:41.979088 5856 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980583 5856 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:41.980601 5856 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:41.980612 5856 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:41.979353 5856 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980730 5856 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:11:41.980751 5856 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.716932 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.716986 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.717008 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.717033 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.717050 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:44Z","lastTransitionTime":"2025-11-29T04:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.819783 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.819829 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.819838 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.819853 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.819866 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:44Z","lastTransitionTime":"2025-11-29T04:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.859520 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2"] Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.860390 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.862903 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.864373 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.879204 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.884073 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd983e9b-92e6-41c8-ae19-4f28c141ba51-env-overrides\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.884140 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd983e9b-92e6-41c8-ae19-4f28c141ba51-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.884188 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4vzb\" (UniqueName: \"kubernetes.io/projected/cd983e9b-92e6-41c8-ae19-4f28c141ba51-kube-api-access-h4vzb\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.884239 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd983e9b-92e6-41c8-ae19-4f28c141ba51-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.894242 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.914232 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.922242 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.922273 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.922285 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.922302 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.922316 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:44Z","lastTransitionTime":"2025-11-29T04:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.931535 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.947096 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.963525 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.978259 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.985265 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd983e9b-92e6-41c8-ae19-4f28c141ba51-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.985311 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4vzb\" (UniqueName: \"kubernetes.io/projected/cd983e9b-92e6-41c8-ae19-4f28c141ba51-kube-api-access-h4vzb\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.985371 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd983e9b-92e6-41c8-ae19-4f28c141ba51-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.985419 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd983e9b-92e6-41c8-ae19-4f28c141ba51-env-overrides\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.986086 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cd983e9b-92e6-41c8-ae19-4f28c141ba51-env-overrides\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.986279 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cd983e9b-92e6-41c8-ae19-4f28c141ba51-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.993990 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cd983e9b-92e6-41c8-ae19-4f28c141ba51-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:44 crc kubenswrapper[4631]: I1129 04:11:44.996516 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:44Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.013505 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:45Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.017702 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4vzb\" (UniqueName: \"kubernetes.io/projected/cd983e9b-92e6-41c8-ae19-4f28c141ba51-kube-api-access-h4vzb\") pod \"ovnkube-control-plane-749d76644c-77lq2\" (UID: \"cd983e9b-92e6-41c8-ae19-4f28c141ba51\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.024619 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.024671 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.024691 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.024715 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.024732 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:45Z","lastTransitionTime":"2025-11-29T04:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.032996 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:45Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.052833 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:45Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.067270 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:45Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.084676 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:45Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.108674 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:42Z\\\",\\\"message\\\":\\\" *v1.Pod event handler 6 for removal\\\\nI1129 04:11:41.979555 5856 factory.go:656] Stopping watch factory\\\\nI1129 04:11:41.979561 5856 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:41.979782 5856 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.979864 5856 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:11:41.978980 5856 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:41.980290 5856 handler.go:208] Removed *v1.Node event handler 7\\\\nI1129 04:11:41.979088 5856 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980583 5856 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:41.980601 5856 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:41.980612 5856 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:41.979353 5856 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980730 5856 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:11:41.980751 5856 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:45Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.124510 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:45Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.127458 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.127515 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.127533 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.127559 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.127575 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:45Z","lastTransitionTime":"2025-11-29T04:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.173665 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.215660 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:45 crc kubenswrapper[4631]: E1129 04:11:45.215785 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.215935 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:45 crc kubenswrapper[4631]: E1129 04:11:45.216240 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.215681 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:45 crc kubenswrapper[4631]: E1129 04:11:45.216619 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.231201 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.231258 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.231276 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.231299 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.231316 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:45Z","lastTransitionTime":"2025-11-29T04:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.333981 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.334038 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.334060 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.334085 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.334101 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:45Z","lastTransitionTime":"2025-11-29T04:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.437673 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.437720 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.437731 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.437752 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.437765 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:45Z","lastTransitionTime":"2025-11-29T04:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.472886 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" event={"ID":"cd983e9b-92e6-41c8-ae19-4f28c141ba51","Type":"ContainerStarted","Data":"f80a518aa9757e228c7f3c1c807556debc63f7099358be04a314b288c9d692c9"} Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.540138 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.540216 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.540238 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.540263 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.540281 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:45Z","lastTransitionTime":"2025-11-29T04:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.643863 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.643927 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.643945 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.643971 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.643990 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:45Z","lastTransitionTime":"2025-11-29T04:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.746311 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.746374 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.746387 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.746405 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.746417 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:45Z","lastTransitionTime":"2025-11-29T04:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.849262 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.849298 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.849308 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.849324 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.849366 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:45Z","lastTransitionTime":"2025-11-29T04:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.953124 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.953191 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.953217 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.953245 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:45 crc kubenswrapper[4631]: I1129 04:11:45.953264 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:45Z","lastTransitionTime":"2025-11-29T04:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.056074 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.056111 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.056126 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.056148 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.056165 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.156711 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.156758 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.156770 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.156792 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.156805 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: E1129 04:11:46.172888 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.177280 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.177313 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.177328 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.177362 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.177376 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: E1129 04:11:46.196688 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.200723 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.200777 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.200794 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.200819 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.200836 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: E1129 04:11:46.219501 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.224050 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.224103 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.224121 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.224145 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.224164 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: E1129 04:11:46.244445 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.249090 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.249142 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.249160 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.249186 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.249205 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: E1129 04:11:46.267032 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: E1129 04:11:46.267194 4631 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.269388 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.269433 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.269460 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.269483 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.269499 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.372690 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.372757 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.372775 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.372800 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.372819 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.476297 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.476406 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.476435 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.476467 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.476490 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.486527 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/1.log" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.487793 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/0.log" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.492656 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092" exitCode=1 Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.492710 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.492817 4631 scope.go:117] "RemoveContainer" containerID="c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.495085 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" event={"ID":"cd983e9b-92e6-41c8-ae19-4f28c141ba51","Type":"ContainerStarted","Data":"d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.495199 4631 scope.go:117] "RemoveContainer" containerID="26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092" Nov 29 04:11:46 crc kubenswrapper[4631]: E1129 04:11:46.495611 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.517952 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.535908 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.558121 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.579428 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.579478 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.579497 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.579521 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.579538 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.583258 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.605646 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.613200 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-b6vgh"] Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.613989 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:46 crc kubenswrapper[4631]: E1129 04:11:46.614043 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.622947 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.643069 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.671458 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:42Z\\\",\\\"message\\\":\\\" *v1.Pod event handler 6 for removal\\\\nI1129 04:11:41.979555 5856 factory.go:656] Stopping watch factory\\\\nI1129 04:11:41.979561 5856 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:41.979782 5856 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.979864 5856 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:11:41.978980 5856 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:41.980290 5856 handler.go:208] Removed *v1.Node event handler 7\\\\nI1129 04:11:41.979088 5856 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980583 5856 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:41.980601 5856 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:41.980612 5856 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:41.979353 5856 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980730 5856 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:11:41.980751 5856 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785813 5985 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:11:44.786909 5985 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:11:44.786928 5985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1129 04:11:44.786942 5985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1129 04:11:44.786948 5985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1129 04:11:44.786979 5985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:44.786975 5985 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785822 5985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:44.787261 5985 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:11:44.787276 5985 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:44.787283 5985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:44.787290 5985 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:44.787423 5985 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.683631 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.683696 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.683721 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.683751 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.683776 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.683637 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.702283 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.721178 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.736952 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.760069 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.776192 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.786270 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.786309 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.786319 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.786364 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.786386 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.791922 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.803026 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bk256\" (UniqueName: \"kubernetes.io/projected/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-kube-api-access-bk256\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.803071 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.809289 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.832654 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.850299 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.874986 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.888526 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.888686 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.888702 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.888709 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.888722 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.888730 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.897555 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.903503 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bk256\" (UniqueName: \"kubernetes.io/projected/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-kube-api-access-bk256\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.903542 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:46 crc kubenswrapper[4631]: E1129 04:11:46.903654 4631 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:11:46 crc kubenswrapper[4631]: E1129 04:11:46.903703 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs podName:c6c5bb91-f03c-4672-bc61-69a68b8c89d6 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:47.40368903 +0000 UTC m=+34.468192544 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs") pod "network-metrics-daemon-b6vgh" (UID: "c6c5bb91-f03c-4672-bc61-69a68b8c89d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.909403 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.920158 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.922862 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bk256\" (UniqueName: \"kubernetes.io/projected/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-kube-api-access-bk256\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.932972 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.944244 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.958183 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.970156 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.981464 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.991511 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.991562 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.991578 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.991608 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.991625 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:46Z","lastTransitionTime":"2025-11-29T04:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:46 crc kubenswrapper[4631]: I1129 04:11:46.998403 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:46Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.019731 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:42Z\\\",\\\"message\\\":\\\" *v1.Pod event handler 6 for removal\\\\nI1129 04:11:41.979555 5856 factory.go:656] Stopping watch factory\\\\nI1129 04:11:41.979561 5856 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:41.979782 5856 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.979864 5856 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:11:41.978980 5856 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:41.980290 5856 handler.go:208] Removed *v1.Node event handler 7\\\\nI1129 04:11:41.979088 5856 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980583 5856 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:41.980601 5856 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:41.980612 5856 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:41.979353 5856 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980730 5856 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:11:41.980751 5856 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785813 5985 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:11:44.786909 5985 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:11:44.786928 5985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1129 04:11:44.786942 5985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1129 04:11:44.786948 5985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1129 04:11:44.786979 5985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:44.786975 5985 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785822 5985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:44.787261 5985 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:11:44.787276 5985 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:44.787283 5985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:44.787290 5985 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:44.787423 5985 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.028582 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.094735 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.094809 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.094834 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.094863 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.094885 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:47Z","lastTransitionTime":"2025-11-29T04:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.198426 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.198489 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.198506 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.198531 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.198550 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:47Z","lastTransitionTime":"2025-11-29T04:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.216273 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.216324 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.216368 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:47 crc kubenswrapper[4631]: E1129 04:11:47.216512 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:47 crc kubenswrapper[4631]: E1129 04:11:47.216608 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:47 crc kubenswrapper[4631]: E1129 04:11:47.216767 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.301840 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.301890 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.301910 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.301931 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.301948 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:47Z","lastTransitionTime":"2025-11-29T04:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.404523 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.404567 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.404583 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.404604 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.404620 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:47Z","lastTransitionTime":"2025-11-29T04:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.410826 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:47 crc kubenswrapper[4631]: E1129 04:11:47.411066 4631 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:11:47 crc kubenswrapper[4631]: E1129 04:11:47.411199 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs podName:c6c5bb91-f03c-4672-bc61-69a68b8c89d6 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:48.411173364 +0000 UTC m=+35.475676908 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs") pod "network-metrics-daemon-b6vgh" (UID: "c6c5bb91-f03c-4672-bc61-69a68b8c89d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.502137 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/1.log" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.508226 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.508278 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.508303 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.508372 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.508398 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:47Z","lastTransitionTime":"2025-11-29T04:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.509420 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" event={"ID":"cd983e9b-92e6-41c8-ae19-4f28c141ba51","Type":"ContainerStarted","Data":"a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f"} Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.530043 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.548969 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.564936 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.588467 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.604509 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.611397 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.611475 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.611501 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.611533 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.611558 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:47Z","lastTransitionTime":"2025-11-29T04:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.620243 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.640161 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.657651 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.676238 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.696915 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.714929 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.715130 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.715274 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.715443 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.715567 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:47Z","lastTransitionTime":"2025-11-29T04:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.717589 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.738039 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.755975 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.775691 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:42Z\\\",\\\"message\\\":\\\" *v1.Pod event handler 6 for removal\\\\nI1129 04:11:41.979555 5856 factory.go:656] Stopping watch factory\\\\nI1129 04:11:41.979561 5856 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:41.979782 5856 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.979864 5856 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:11:41.978980 5856 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:41.980290 5856 handler.go:208] Removed *v1.Node event handler 7\\\\nI1129 04:11:41.979088 5856 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980583 5856 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:41.980601 5856 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:41.980612 5856 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:41.979353 5856 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980730 5856 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:11:41.980751 5856 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785813 5985 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:11:44.786909 5985 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:11:44.786928 5985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1129 04:11:44.786942 5985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1129 04:11:44.786948 5985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1129 04:11:44.786979 5985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:44.786975 5985 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785822 5985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:44.787261 5985 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:11:44.787276 5985 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:44.787283 5985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:44.787290 5985 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:44.787423 5985 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.787321 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.804653 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.817904 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.817946 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.817962 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.817986 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.818004 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:47Z","lastTransitionTime":"2025-11-29T04:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.920555 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.920594 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.920606 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.920622 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:47 crc kubenswrapper[4631]: I1129 04:11:47.920634 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:47Z","lastTransitionTime":"2025-11-29T04:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.023377 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.023436 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.023460 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.023487 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.023509 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:48Z","lastTransitionTime":"2025-11-29T04:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.125967 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.126031 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.126048 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.126075 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.126092 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:48Z","lastTransitionTime":"2025-11-29T04:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.216272 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.216501 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.227981 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.228045 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.228063 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.228089 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.228107 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:48Z","lastTransitionTime":"2025-11-29T04:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.331467 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.331544 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.331564 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.331590 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.331608 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:48Z","lastTransitionTime":"2025-11-29T04:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.419268 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.419507 4631 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.419611 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs podName:c6c5bb91-f03c-4672-bc61-69a68b8c89d6 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:50.419585051 +0000 UTC m=+37.484088595 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs") pod "network-metrics-daemon-b6vgh" (UID: "c6c5bb91-f03c-4672-bc61-69a68b8c89d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.434413 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.434472 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.434491 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.434515 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.434532 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:48Z","lastTransitionTime":"2025-11-29T04:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.537680 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.537731 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.537748 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.537775 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.537794 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:48Z","lastTransitionTime":"2025-11-29T04:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.573213 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.596757 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.616840 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.634174 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.640148 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.640205 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.640223 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.640246 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.640262 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:48Z","lastTransitionTime":"2025-11-29T04:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.652769 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.671446 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.691808 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.711999 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.735041 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.743124 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.743228 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.743245 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.743269 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.743286 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:48Z","lastTransitionTime":"2025-11-29T04:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.759176 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.781366 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.801757 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.824040 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.824184 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:12:04.824161907 +0000 UTC m=+51.888665431 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.824799 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.824955 4631 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.825112 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:04.825045669 +0000 UTC m=+51.889549223 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.825165 4631 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.825213 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:04.825200483 +0000 UTC m=+51.889704007 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.825477 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.834531 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:42Z\\\",\\\"message\\\":\\\" *v1.Pod event handler 6 for removal\\\\nI1129 04:11:41.979555 5856 factory.go:656] Stopping watch factory\\\\nI1129 04:11:41.979561 5856 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:41.979782 5856 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.979864 5856 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:11:41.978980 5856 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:41.980290 5856 handler.go:208] Removed *v1.Node event handler 7\\\\nI1129 04:11:41.979088 5856 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980583 5856 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:41.980601 5856 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:41.980612 5856 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:41.979353 5856 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980730 5856 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:11:41.980751 5856 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785813 5985 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:11:44.786909 5985 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:11:44.786928 5985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1129 04:11:44.786942 5985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1129 04:11:44.786948 5985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1129 04:11:44.786979 5985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:44.786975 5985 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785822 5985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:44.787261 5985 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:11:44.787276 5985 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:44.787283 5985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:44.787290 5985 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:44.787423 5985 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.845371 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.845644 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.845801 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.845977 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.846167 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:48Z","lastTransitionTime":"2025-11-29T04:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.848977 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.869105 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.886203 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.903198 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:48Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.926143 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.926255 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.926324 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.926391 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.926408 4631 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.926489 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:04.92646601 +0000 UTC m=+51.990969574 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.926526 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.926585 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.926605 4631 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:48 crc kubenswrapper[4631]: E1129 04:11:48.926693 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:04.926668605 +0000 UTC m=+51.991172159 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.949652 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.949715 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.949732 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.949757 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:48 crc kubenswrapper[4631]: I1129 04:11:48.949774 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:48Z","lastTransitionTime":"2025-11-29T04:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.053513 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.053569 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.053586 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.053613 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.053631 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:49Z","lastTransitionTime":"2025-11-29T04:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.156543 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.156608 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.156625 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.156651 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.156669 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:49Z","lastTransitionTime":"2025-11-29T04:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.216020 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.216143 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.216227 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:49 crc kubenswrapper[4631]: E1129 04:11:49.216463 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:49 crc kubenswrapper[4631]: E1129 04:11:49.216662 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:49 crc kubenswrapper[4631]: E1129 04:11:49.216843 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.260074 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.260123 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.260140 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.260164 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.260184 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:49Z","lastTransitionTime":"2025-11-29T04:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.363770 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.363837 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.363853 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.363877 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.363893 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:49Z","lastTransitionTime":"2025-11-29T04:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.467107 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.467182 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.467208 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.467238 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.467260 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:49Z","lastTransitionTime":"2025-11-29T04:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.570644 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.570727 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.570745 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.571151 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.571201 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:49Z","lastTransitionTime":"2025-11-29T04:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.674707 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.674765 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.674782 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.674809 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.674828 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:49Z","lastTransitionTime":"2025-11-29T04:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.777697 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.777760 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.777778 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.777806 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.777824 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:49Z","lastTransitionTime":"2025-11-29T04:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.881024 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.881096 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.881115 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.881141 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.881160 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:49Z","lastTransitionTime":"2025-11-29T04:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.984847 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.985016 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.985038 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.985151 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:49 crc kubenswrapper[4631]: I1129 04:11:49.985256 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:49Z","lastTransitionTime":"2025-11-29T04:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.088902 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.088962 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.088978 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.089008 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.089026 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:50Z","lastTransitionTime":"2025-11-29T04:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.192037 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.192093 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.192110 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.192160 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.192178 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:50Z","lastTransitionTime":"2025-11-29T04:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.216491 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:50 crc kubenswrapper[4631]: E1129 04:11:50.216708 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.295783 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.295864 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.295881 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.295906 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.295954 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:50Z","lastTransitionTime":"2025-11-29T04:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.399268 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.399321 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.399357 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.399379 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.399396 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:50Z","lastTransitionTime":"2025-11-29T04:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.443772 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:50 crc kubenswrapper[4631]: E1129 04:11:50.443938 4631 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:11:50 crc kubenswrapper[4631]: E1129 04:11:50.443994 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs podName:c6c5bb91-f03c-4672-bc61-69a68b8c89d6 nodeName:}" failed. No retries permitted until 2025-11-29 04:11:54.443975289 +0000 UTC m=+41.508478813 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs") pod "network-metrics-daemon-b6vgh" (UID: "c6c5bb91-f03c-4672-bc61-69a68b8c89d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.501437 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.501505 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.501524 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.501551 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.501572 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:50Z","lastTransitionTime":"2025-11-29T04:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.604136 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.604256 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.604275 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.604301 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.604319 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:50Z","lastTransitionTime":"2025-11-29T04:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.707974 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.708013 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.708021 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.708035 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.708045 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:50Z","lastTransitionTime":"2025-11-29T04:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.810667 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.811017 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.811169 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.811326 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.811511 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:50Z","lastTransitionTime":"2025-11-29T04:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.914217 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.914279 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.914298 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.914323 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:50 crc kubenswrapper[4631]: I1129 04:11:50.914372 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:50Z","lastTransitionTime":"2025-11-29T04:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.017882 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.017946 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.017963 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.017987 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.018004 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:51Z","lastTransitionTime":"2025-11-29T04:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.120629 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.120704 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.120726 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.120753 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.120772 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:51Z","lastTransitionTime":"2025-11-29T04:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.216246 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.216412 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:51 crc kubenswrapper[4631]: E1129 04:11:51.216513 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.216557 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:51 crc kubenswrapper[4631]: E1129 04:11:51.216631 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:51 crc kubenswrapper[4631]: E1129 04:11:51.216740 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.222814 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.222858 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.222877 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.222899 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.222916 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:51Z","lastTransitionTime":"2025-11-29T04:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.326273 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.326357 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.326374 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.326403 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.326420 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:51Z","lastTransitionTime":"2025-11-29T04:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.428892 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.428959 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.428983 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.429013 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.429036 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:51Z","lastTransitionTime":"2025-11-29T04:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.532754 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.532830 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.532853 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.532883 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.532904 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:51Z","lastTransitionTime":"2025-11-29T04:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.636812 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.636875 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.636892 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.636918 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.636935 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:51Z","lastTransitionTime":"2025-11-29T04:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.740295 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.740372 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.740390 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.740412 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.740428 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:51Z","lastTransitionTime":"2025-11-29T04:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.843754 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.843809 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.843826 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.843848 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.843866 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:51Z","lastTransitionTime":"2025-11-29T04:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.947301 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.948121 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.948161 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.948211 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:51 crc kubenswrapper[4631]: I1129 04:11:51.948231 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:51Z","lastTransitionTime":"2025-11-29T04:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.050894 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.050947 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.050964 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.050987 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.051005 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:52Z","lastTransitionTime":"2025-11-29T04:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.154054 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.154435 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.154602 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.154751 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.154936 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:52Z","lastTransitionTime":"2025-11-29T04:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.216062 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:52 crc kubenswrapper[4631]: E1129 04:11:52.216539 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.258057 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.258127 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.258146 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.258171 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.258190 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:52Z","lastTransitionTime":"2025-11-29T04:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.361717 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.361809 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.361870 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.361897 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.361915 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:52Z","lastTransitionTime":"2025-11-29T04:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.465132 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.465181 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.465240 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.465260 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.465272 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:52Z","lastTransitionTime":"2025-11-29T04:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.568205 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.568255 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.568268 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.568287 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.568303 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:52Z","lastTransitionTime":"2025-11-29T04:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.670880 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.670940 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.670957 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.670980 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.670996 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:52Z","lastTransitionTime":"2025-11-29T04:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.773856 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.773928 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.773951 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.773984 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.774007 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:52Z","lastTransitionTime":"2025-11-29T04:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.877609 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.877701 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.877720 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.877744 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.877763 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:52Z","lastTransitionTime":"2025-11-29T04:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.980866 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.980903 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.980911 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.980925 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:52 crc kubenswrapper[4631]: I1129 04:11:52.980935 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:52Z","lastTransitionTime":"2025-11-29T04:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.083535 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.083577 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.083592 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.083610 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.083622 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:53Z","lastTransitionTime":"2025-11-29T04:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.186685 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.186745 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.186762 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.186786 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.186803 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:53Z","lastTransitionTime":"2025-11-29T04:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.216017 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.216142 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.216492 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:53 crc kubenswrapper[4631]: E1129 04:11:53.216444 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:53 crc kubenswrapper[4631]: E1129 04:11:53.216651 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:53 crc kubenswrapper[4631]: E1129 04:11:53.216767 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.237861 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.252167 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.274983 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.288642 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.288707 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.288726 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.288752 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.288770 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:53Z","lastTransitionTime":"2025-11-29T04:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.298229 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.318381 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.336243 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.370919 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c64ac8b52e7eea41fc2b19379909be68beb51557c0917f9a137f83273f6c8f09\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:42Z\\\",\\\"message\\\":\\\" *v1.Pod event handler 6 for removal\\\\nI1129 04:11:41.979555 5856 factory.go:656] Stopping watch factory\\\\nI1129 04:11:41.979561 5856 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:41.979782 5856 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.979864 5856 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:11:41.978980 5856 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:41.980290 5856 handler.go:208] Removed *v1.Node event handler 7\\\\nI1129 04:11:41.979088 5856 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980583 5856 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:41.980601 5856 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:41.980612 5856 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:41.979353 5856 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:41.980730 5856 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:11:41.980751 5856 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785813 5985 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:11:44.786909 5985 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:11:44.786928 5985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1129 04:11:44.786942 5985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1129 04:11:44.786948 5985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1129 04:11:44.786979 5985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:44.786975 5985 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785822 5985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:44.787261 5985 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:11:44.787276 5985 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:44.787283 5985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:44.787290 5985 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:44.787423 5985 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.388403 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.391740 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.391822 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.391849 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.391880 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.391904 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:53Z","lastTransitionTime":"2025-11-29T04:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.411289 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.441020 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.464636 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.493641 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.494527 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.494564 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.494573 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.494587 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.494611 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:53Z","lastTransitionTime":"2025-11-29T04:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.508657 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.522089 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.533506 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.543188 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.597566 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.597606 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.597615 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.597630 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.597639 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:53Z","lastTransitionTime":"2025-11-29T04:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.700797 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.700848 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.700864 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.700888 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.700905 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:53Z","lastTransitionTime":"2025-11-29T04:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.803805 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.803857 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.803874 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.803900 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.803917 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:53Z","lastTransitionTime":"2025-11-29T04:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.906780 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.907106 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.907296 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.907538 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:53 crc kubenswrapper[4631]: I1129 04:11:53.907741 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:53Z","lastTransitionTime":"2025-11-29T04:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.011287 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.011380 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.011410 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.011439 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.011457 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:54Z","lastTransitionTime":"2025-11-29T04:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.114851 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.114913 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.114932 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.114961 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.114980 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:54Z","lastTransitionTime":"2025-11-29T04:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.215554 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:54 crc kubenswrapper[4631]: E1129 04:11:54.216233 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.217406 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.217453 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.217476 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.217502 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.217519 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:54Z","lastTransitionTime":"2025-11-29T04:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.320371 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.320438 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.320462 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.320490 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.320511 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:54Z","lastTransitionTime":"2025-11-29T04:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.424307 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.424411 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.424439 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.424468 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.424487 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:54Z","lastTransitionTime":"2025-11-29T04:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.492394 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:54 crc kubenswrapper[4631]: E1129 04:11:54.492588 4631 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:11:54 crc kubenswrapper[4631]: E1129 04:11:54.492690 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs podName:c6c5bb91-f03c-4672-bc61-69a68b8c89d6 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:02.492663944 +0000 UTC m=+49.557167498 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs") pod "network-metrics-daemon-b6vgh" (UID: "c6c5bb91-f03c-4672-bc61-69a68b8c89d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.526908 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.526962 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.526973 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.526989 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.527001 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:54Z","lastTransitionTime":"2025-11-29T04:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.629822 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.629886 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.629903 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.629927 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.629945 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:54Z","lastTransitionTime":"2025-11-29T04:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.732662 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.732760 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.732779 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.732801 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.732817 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:54Z","lastTransitionTime":"2025-11-29T04:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.835705 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.835761 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.835783 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.835807 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.835824 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:54Z","lastTransitionTime":"2025-11-29T04:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.839902 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.840974 4631 scope.go:117] "RemoveContainer" containerID="26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092" Nov 29 04:11:54 crc kubenswrapper[4631]: E1129 04:11:54.841324 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.862557 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:54Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.880931 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:54Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.897966 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:54Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.921998 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:54Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.939054 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.939165 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.939196 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.939273 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.939301 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:54Z","lastTransitionTime":"2025-11-29T04:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.940631 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:54Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.956097 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:54Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:54 crc kubenswrapper[4631]: I1129 04:11:54.977240 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:54Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.001298 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:54Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.021019 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:55Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.042103 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.042153 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.042170 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.042195 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.042214 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:55Z","lastTransitionTime":"2025-11-29T04:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.057033 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785813 5985 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:11:44.786909 5985 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:11:44.786928 5985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1129 04:11:44.786942 5985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1129 04:11:44.786948 5985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1129 04:11:44.786979 5985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:44.786975 5985 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785822 5985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:44.787261 5985 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:11:44.787276 5985 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:44.787283 5985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:44.787290 5985 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:44.787423 5985 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:55Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.078968 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:55Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.100372 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:55Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.126305 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:55Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.145419 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.145510 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.145535 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.145565 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.145589 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:55Z","lastTransitionTime":"2025-11-29T04:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.148933 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:55Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.169756 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:55Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.190163 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:55Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.215772 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.215792 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.215866 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:55 crc kubenswrapper[4631]: E1129 04:11:55.216744 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:55 crc kubenswrapper[4631]: E1129 04:11:55.216895 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:55 crc kubenswrapper[4631]: E1129 04:11:55.217122 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.248503 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.248559 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.248577 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.248602 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.248618 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:55Z","lastTransitionTime":"2025-11-29T04:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.351997 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.352058 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.352076 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.352100 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.352118 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:55Z","lastTransitionTime":"2025-11-29T04:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.455469 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.455532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.455595 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.455622 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.455641 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:55Z","lastTransitionTime":"2025-11-29T04:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.558371 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.558431 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.558450 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.558480 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.558500 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:55Z","lastTransitionTime":"2025-11-29T04:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.661690 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.661767 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.661791 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.661817 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.661839 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:55Z","lastTransitionTime":"2025-11-29T04:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.764467 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.764566 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.764586 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.764610 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.764627 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:55Z","lastTransitionTime":"2025-11-29T04:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.867204 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.867261 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.867467 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.867495 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.867514 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:55Z","lastTransitionTime":"2025-11-29T04:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.972932 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.973010 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.973028 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.973050 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:55 crc kubenswrapper[4631]: I1129 04:11:55.973119 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:55Z","lastTransitionTime":"2025-11-29T04:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.075834 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.075919 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.075939 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.075966 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.075985 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.179144 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.179207 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.179227 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.179254 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.179272 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.216229 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:56 crc kubenswrapper[4631]: E1129 04:11:56.216522 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.282745 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.282809 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.282826 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.282852 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.282869 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.334425 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.334493 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.334512 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.334538 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.334558 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: E1129 04:11:56.356658 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:56Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.361935 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.361991 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.362007 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.362030 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.362046 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: E1129 04:11:56.382106 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:56Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.387801 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.387871 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.387891 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.387919 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.387937 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: E1129 04:11:56.407394 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:56Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.412958 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.413025 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.413044 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.413069 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.413086 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: E1129 04:11:56.434030 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:56Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.439424 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.439474 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.439491 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.439512 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.439528 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: E1129 04:11:56.460205 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:11:56Z is after 2025-08-24T17:21:41Z" Nov 29 04:11:56 crc kubenswrapper[4631]: E1129 04:11:56.460457 4631 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.462514 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.462563 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.462581 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.462605 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.462622 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.565236 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.565304 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.565322 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.565369 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.565388 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.667962 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.668022 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.668039 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.668061 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.668077 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.771234 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.771298 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.771315 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.771361 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.771379 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.874852 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.874922 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.874970 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.875000 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.875024 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.977705 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.977769 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.977786 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.977815 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:56 crc kubenswrapper[4631]: I1129 04:11:56.977832 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:56Z","lastTransitionTime":"2025-11-29T04:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.081357 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.081416 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.081433 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.081458 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.081477 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:57Z","lastTransitionTime":"2025-11-29T04:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.184455 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.184515 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.184532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.184558 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.184575 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:57Z","lastTransitionTime":"2025-11-29T04:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.216044 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:57 crc kubenswrapper[4631]: E1129 04:11:57.216232 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.216568 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:57 crc kubenswrapper[4631]: E1129 04:11:57.216696 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.216787 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:57 crc kubenswrapper[4631]: E1129 04:11:57.216979 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.287717 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.288642 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.288778 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.288906 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.289022 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:57Z","lastTransitionTime":"2025-11-29T04:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.392632 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.392965 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.393109 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.393309 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.393504 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:57Z","lastTransitionTime":"2025-11-29T04:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.496785 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.496886 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.496906 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.496969 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.496989 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:57Z","lastTransitionTime":"2025-11-29T04:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.600113 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.600591 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.600757 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.600912 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.601055 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:57Z","lastTransitionTime":"2025-11-29T04:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.704129 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.704191 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.704208 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.704232 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.704249 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:57Z","lastTransitionTime":"2025-11-29T04:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.806675 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.806716 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.806728 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.806744 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.806756 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:57Z","lastTransitionTime":"2025-11-29T04:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.909798 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.909831 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.909842 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.909856 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:57 crc kubenswrapper[4631]: I1129 04:11:57.909866 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:57Z","lastTransitionTime":"2025-11-29T04:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.013232 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.013279 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.013292 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.013312 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.013326 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:58Z","lastTransitionTime":"2025-11-29T04:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.115417 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.115454 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.115489 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.115505 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.115514 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:58Z","lastTransitionTime":"2025-11-29T04:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.215492 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:11:58 crc kubenswrapper[4631]: E1129 04:11:58.215691 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.218061 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.218142 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.218167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.218200 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.218225 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:58Z","lastTransitionTime":"2025-11-29T04:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.320987 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.321143 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.321167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.321196 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.321213 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:58Z","lastTransitionTime":"2025-11-29T04:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.423851 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.423931 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.423951 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.423983 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.424002 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:58Z","lastTransitionTime":"2025-11-29T04:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.526593 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.526658 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.526682 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.526714 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.526735 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:58Z","lastTransitionTime":"2025-11-29T04:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.630199 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.630272 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.630294 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.630323 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.630379 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:58Z","lastTransitionTime":"2025-11-29T04:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.733024 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.733097 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.733123 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.733149 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.733167 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:58Z","lastTransitionTime":"2025-11-29T04:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.836708 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.836760 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.836777 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.836804 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.836824 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:58Z","lastTransitionTime":"2025-11-29T04:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.939393 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.939452 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.939481 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.939511 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:58 crc kubenswrapper[4631]: I1129 04:11:58.939535 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:58Z","lastTransitionTime":"2025-11-29T04:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.042941 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.042992 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.043009 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.043036 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.043055 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:59Z","lastTransitionTime":"2025-11-29T04:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.145934 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.145985 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.146001 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.146028 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.146045 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:59Z","lastTransitionTime":"2025-11-29T04:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.235002 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:11:59 crc kubenswrapper[4631]: E1129 04:11:59.235173 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.236025 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:11:59 crc kubenswrapper[4631]: E1129 04:11:59.236300 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.240896 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:11:59 crc kubenswrapper[4631]: E1129 04:11:59.241835 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.248709 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.248749 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.248765 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.248787 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.248816 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:59Z","lastTransitionTime":"2025-11-29T04:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.353612 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.354035 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.354053 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.354079 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.354098 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:59Z","lastTransitionTime":"2025-11-29T04:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.456751 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.456792 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.456804 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.456824 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.456870 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:59Z","lastTransitionTime":"2025-11-29T04:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.559231 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.559285 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.559301 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.559324 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.559377 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:59Z","lastTransitionTime":"2025-11-29T04:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.662478 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.662514 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.662522 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.662535 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.662543 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:59Z","lastTransitionTime":"2025-11-29T04:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.765430 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.765489 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.765508 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.765530 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.765547 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:59Z","lastTransitionTime":"2025-11-29T04:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.868398 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.868471 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.868483 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.868503 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.868541 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:59Z","lastTransitionTime":"2025-11-29T04:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.971168 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.971228 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.971247 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.971271 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:11:59 crc kubenswrapper[4631]: I1129 04:11:59.971288 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:11:59Z","lastTransitionTime":"2025-11-29T04:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.074773 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.074827 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.074844 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.074868 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.074884 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:00Z","lastTransitionTime":"2025-11-29T04:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.178156 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.178212 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.178234 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.178259 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.178276 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:00Z","lastTransitionTime":"2025-11-29T04:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.216434 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:00 crc kubenswrapper[4631]: E1129 04:12:00.216613 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.281764 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.281823 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.281841 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.281866 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.281888 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:00Z","lastTransitionTime":"2025-11-29T04:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.385393 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.385444 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.385460 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.385501 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.385522 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:00Z","lastTransitionTime":"2025-11-29T04:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.489429 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.489493 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.489510 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.489535 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.489552 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:00Z","lastTransitionTime":"2025-11-29T04:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.591853 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.591925 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.591943 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.591967 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.591985 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:00Z","lastTransitionTime":"2025-11-29T04:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.695456 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.695519 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.695536 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.695559 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.695575 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:00Z","lastTransitionTime":"2025-11-29T04:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.798275 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.798372 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.798392 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.798415 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.798437 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:00Z","lastTransitionTime":"2025-11-29T04:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.901434 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.901496 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.901513 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.901538 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:00 crc kubenswrapper[4631]: I1129 04:12:00.901556 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:00Z","lastTransitionTime":"2025-11-29T04:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.005028 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.005090 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.005111 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.005138 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.005156 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:01Z","lastTransitionTime":"2025-11-29T04:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.107731 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.107799 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.107823 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.107853 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.107878 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:01Z","lastTransitionTime":"2025-11-29T04:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.210259 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.210289 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.210299 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.210317 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.210354 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:01Z","lastTransitionTime":"2025-11-29T04:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.216557 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.216644 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.216583 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:01 crc kubenswrapper[4631]: E1129 04:12:01.216707 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:01 crc kubenswrapper[4631]: E1129 04:12:01.216768 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:01 crc kubenswrapper[4631]: E1129 04:12:01.216847 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.313182 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.313231 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.313246 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.313270 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.313298 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:01Z","lastTransitionTime":"2025-11-29T04:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.415909 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.415976 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.416001 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.416027 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.416043 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:01Z","lastTransitionTime":"2025-11-29T04:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.519847 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.519909 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.519931 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.519960 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.519982 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:01Z","lastTransitionTime":"2025-11-29T04:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.623901 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.624395 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.624578 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.624728 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.624878 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:01Z","lastTransitionTime":"2025-11-29T04:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.733112 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.733168 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.733185 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.733208 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.733224 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:01Z","lastTransitionTime":"2025-11-29T04:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.836504 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.836633 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.836656 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.836685 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.836704 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:01Z","lastTransitionTime":"2025-11-29T04:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.939873 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.939948 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.939975 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.940005 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:01 crc kubenswrapper[4631]: I1129 04:12:01.940028 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:01Z","lastTransitionTime":"2025-11-29T04:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.043554 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.043616 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.043637 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.043662 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.043679 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:02Z","lastTransitionTime":"2025-11-29T04:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.146152 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.146199 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.146218 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.146242 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.146259 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:02Z","lastTransitionTime":"2025-11-29T04:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.215639 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:02 crc kubenswrapper[4631]: E1129 04:12:02.215825 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.249381 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.249456 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.249474 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.249497 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.249514 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:02Z","lastTransitionTime":"2025-11-29T04:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.351919 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.351975 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.351991 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.352019 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.352036 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:02Z","lastTransitionTime":"2025-11-29T04:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.455141 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.455475 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.455617 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.455762 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.455881 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:02Z","lastTransitionTime":"2025-11-29T04:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.558055 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.558103 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.558120 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.558144 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.558164 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:02Z","lastTransitionTime":"2025-11-29T04:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.595720 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:02 crc kubenswrapper[4631]: E1129 04:12:02.595890 4631 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:12:02 crc kubenswrapper[4631]: E1129 04:12:02.596061 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs podName:c6c5bb91-f03c-4672-bc61-69a68b8c89d6 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:18.596039663 +0000 UTC m=+65.660543207 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs") pod "network-metrics-daemon-b6vgh" (UID: "c6c5bb91-f03c-4672-bc61-69a68b8c89d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.661433 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.661484 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.661500 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.661524 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.661541 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:02Z","lastTransitionTime":"2025-11-29T04:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.763726 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.763812 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.763830 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.763852 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.763869 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:02Z","lastTransitionTime":"2025-11-29T04:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.866440 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.866496 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.866517 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.866548 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.866568 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:02Z","lastTransitionTime":"2025-11-29T04:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.969395 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.969465 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.969490 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.969522 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:02 crc kubenswrapper[4631]: I1129 04:12:02.969544 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:02Z","lastTransitionTime":"2025-11-29T04:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.072296 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.072370 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.072387 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.072410 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.072428 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:03Z","lastTransitionTime":"2025-11-29T04:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.175460 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.175500 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.175517 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.175540 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.175557 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:03Z","lastTransitionTime":"2025-11-29T04:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.215534 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.215605 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:03 crc kubenswrapper[4631]: E1129 04:12:03.215765 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.215800 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:03 crc kubenswrapper[4631]: E1129 04:12:03.216026 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:03 crc kubenswrapper[4631]: E1129 04:12:03.216267 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.241872 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.261261 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.278138 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.278212 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.278320 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.278391 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.278462 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:03Z","lastTransitionTime":"2025-11-29T04:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.285584 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.307421 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.327193 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.345058 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.368509 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.381402 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.381457 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.381469 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.381485 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.381497 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:03Z","lastTransitionTime":"2025-11-29T04:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.395626 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.416685 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.436562 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.467606 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785813 5985 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:11:44.786909 5985 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:11:44.786928 5985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1129 04:11:44.786942 5985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1129 04:11:44.786948 5985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1129 04:11:44.786979 5985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:44.786975 5985 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785822 5985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:44.787261 5985 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:11:44.787276 5985 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:44.787283 5985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:44.787290 5985 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:44.787423 5985 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.483178 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.484067 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.484140 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.484154 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.484172 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.484183 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:03Z","lastTransitionTime":"2025-11-29T04:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.499796 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.518651 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.534271 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.548905 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.587389 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.587476 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.587538 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.587572 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.587590 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:03Z","lastTransitionTime":"2025-11-29T04:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.690474 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.690534 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.690552 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.690580 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.690597 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:03Z","lastTransitionTime":"2025-11-29T04:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.793324 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.793664 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.793788 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.793916 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.794034 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:03Z","lastTransitionTime":"2025-11-29T04:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.898013 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.898088 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.898105 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.898133 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:03 crc kubenswrapper[4631]: I1129 04:12:03.898160 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:03Z","lastTransitionTime":"2025-11-29T04:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.000717 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.000795 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.000813 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.000837 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.000854 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:04Z","lastTransitionTime":"2025-11-29T04:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.104361 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.104435 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.104455 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.104488 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.104505 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:04Z","lastTransitionTime":"2025-11-29T04:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.207698 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.207756 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.207773 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.207796 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.207813 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:04Z","lastTransitionTime":"2025-11-29T04:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.216378 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:04 crc kubenswrapper[4631]: E1129 04:12:04.216697 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.310749 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.311123 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.311248 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.311416 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.311563 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:04Z","lastTransitionTime":"2025-11-29T04:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.414592 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.414633 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.414646 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.414663 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.414676 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:04Z","lastTransitionTime":"2025-11-29T04:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.517639 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.517692 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.517710 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.517734 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.517793 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:04Z","lastTransitionTime":"2025-11-29T04:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.620060 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.620106 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.620119 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.620137 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.620151 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:04Z","lastTransitionTime":"2025-11-29T04:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.722403 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.722459 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.722475 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.722498 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.722514 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:04Z","lastTransitionTime":"2025-11-29T04:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.825139 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.825190 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.825206 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.825231 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.825262 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:04Z","lastTransitionTime":"2025-11-29T04:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.916087 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:12:04 crc kubenswrapper[4631]: E1129 04:12:04.916286 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:12:36.916250806 +0000 UTC m=+83.980754360 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.916441 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.916500 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:04 crc kubenswrapper[4631]: E1129 04:12:04.916574 4631 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:12:04 crc kubenswrapper[4631]: E1129 04:12:04.916673 4631 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:12:04 crc kubenswrapper[4631]: E1129 04:12:04.916694 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:36.916646545 +0000 UTC m=+83.981150099 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:12:04 crc kubenswrapper[4631]: E1129 04:12:04.916726 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:36.916712077 +0000 UTC m=+83.981215631 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.934795 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.934862 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.934886 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.934916 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:04 crc kubenswrapper[4631]: I1129 04:12:04.934938 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:04Z","lastTransitionTime":"2025-11-29T04:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.017520 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.017892 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.017958 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.017989 4631 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.018054 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.018104 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:37.018070316 +0000 UTC m=+84.082573900 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.018118 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.018147 4631 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.018284 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:37.01822944 +0000 UTC m=+84.082732984 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.018319 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.037441 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.037994 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.038040 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.038104 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.038130 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.038150 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:05Z","lastTransitionTime":"2025-11-29T04:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.056833 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.061899 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.079306 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.094438 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.152814 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.152852 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.152864 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.152880 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.152891 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:05Z","lastTransitionTime":"2025-11-29T04:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.168388 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.183461 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.197464 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.213468 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.215732 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.215885 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.216083 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.216291 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.216721 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:05 crc kubenswrapper[4631]: E1129 04:12:05.218317 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.241099 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785813 5985 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:11:44.786909 5985 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:11:44.786928 5985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1129 04:11:44.786942 5985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1129 04:11:44.786948 5985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1129 04:11:44.786979 5985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:44.786975 5985 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785822 5985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:44.787261 5985 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:11:44.787276 5985 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:44.787283 5985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:44.787290 5985 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:44.787423 5985 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.255132 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.255174 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.255188 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.255209 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.255226 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:05Z","lastTransitionTime":"2025-11-29T04:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.255549 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.273708 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.294657 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.314409 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.333474 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.351036 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.358773 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.358825 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.358843 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.358869 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.358895 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:05Z","lastTransitionTime":"2025-11-29T04:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.373269 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.391895 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:05Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.461570 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.461665 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.461683 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.461736 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.461753 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:05Z","lastTransitionTime":"2025-11-29T04:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.564671 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.564776 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.564797 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.564858 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.564881 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:05Z","lastTransitionTime":"2025-11-29T04:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.668117 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.668176 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.668200 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.668233 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.668255 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:05Z","lastTransitionTime":"2025-11-29T04:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.771603 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.771670 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.771688 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.771718 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.771737 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:05Z","lastTransitionTime":"2025-11-29T04:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.875288 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.875393 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.875411 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.875435 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.875450 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:05Z","lastTransitionTime":"2025-11-29T04:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.978094 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.978159 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.978176 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.978202 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:05 crc kubenswrapper[4631]: I1129 04:12:05.978219 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:05Z","lastTransitionTime":"2025-11-29T04:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.081498 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.081558 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.081575 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.081603 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.081640 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.184461 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.184510 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.184521 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.184538 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.184549 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.215572 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:06 crc kubenswrapper[4631]: E1129 04:12:06.215722 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.286723 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.286756 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.286780 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.286814 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.286823 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.389032 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.389075 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.389084 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.389095 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.389103 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.491455 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.491523 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.491541 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.491564 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.491582 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.594463 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.594522 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.594541 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.594566 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.594583 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.632925 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.633044 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.633071 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.633103 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.633126 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: E1129 04:12:06.654749 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:06Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.660886 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.660929 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.660945 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.660967 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.660983 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: E1129 04:12:06.681672 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:06Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.686327 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.686413 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.686431 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.686453 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.686469 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: E1129 04:12:06.702695 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:06Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.707792 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.707855 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.707878 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.707909 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.707930 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: E1129 04:12:06.728900 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:06Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.733642 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.733878 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.734096 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.734307 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.734528 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: E1129 04:12:06.755518 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:06Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:06 crc kubenswrapper[4631]: E1129 04:12:06.755673 4631 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.757493 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.757536 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.757551 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.757571 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.757586 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.861799 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.861835 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.861848 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.861864 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.861877 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.964538 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.964585 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.964599 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.964619 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:06 crc kubenswrapper[4631]: I1129 04:12:06.964635 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:06Z","lastTransitionTime":"2025-11-29T04:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.068105 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.068154 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.068170 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.068194 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.068210 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:07Z","lastTransitionTime":"2025-11-29T04:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.170591 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.170659 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.170677 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.170704 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.170725 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:07Z","lastTransitionTime":"2025-11-29T04:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.215695 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.215737 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:07 crc kubenswrapper[4631]: E1129 04:12:07.216127 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:07 crc kubenswrapper[4631]: E1129 04:12:07.216251 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.216852 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:07 crc kubenswrapper[4631]: E1129 04:12:07.217061 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.273706 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.273821 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.273850 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.273882 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.273905 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:07Z","lastTransitionTime":"2025-11-29T04:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.376570 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.376621 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.376637 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.376659 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.376677 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:07Z","lastTransitionTime":"2025-11-29T04:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.479608 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.479653 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.479670 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.479691 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.479708 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:07Z","lastTransitionTime":"2025-11-29T04:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.582583 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.582655 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.582679 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.582725 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.582750 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:07Z","lastTransitionTime":"2025-11-29T04:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.685805 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.685855 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.685872 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.685893 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.685910 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:07Z","lastTransitionTime":"2025-11-29T04:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.789022 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.789067 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.789083 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.789105 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.789121 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:07Z","lastTransitionTime":"2025-11-29T04:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.892546 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.892608 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.892629 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.892656 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.892675 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:07Z","lastTransitionTime":"2025-11-29T04:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.995712 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.995796 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.995814 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.995836 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:07 crc kubenswrapper[4631]: I1129 04:12:07.995854 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:07Z","lastTransitionTime":"2025-11-29T04:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.098305 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.098418 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.098444 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.098473 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.098494 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:08Z","lastTransitionTime":"2025-11-29T04:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.201803 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.201838 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.201848 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.201864 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.201875 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:08Z","lastTransitionTime":"2025-11-29T04:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.216203 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:08 crc kubenswrapper[4631]: E1129 04:12:08.216428 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.217626 4631 scope.go:117] "RemoveContainer" containerID="26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.304571 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.304627 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.304643 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.304664 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.304681 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:08Z","lastTransitionTime":"2025-11-29T04:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.407894 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.408279 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.408297 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.408322 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.408363 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:08Z","lastTransitionTime":"2025-11-29T04:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.510726 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.510766 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.510782 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.510801 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.510817 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:08Z","lastTransitionTime":"2025-11-29T04:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.613886 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.613930 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.613947 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.613970 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.613988 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:08Z","lastTransitionTime":"2025-11-29T04:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.622021 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/1.log" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.628128 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.629923 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.655891 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.671696 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.693691 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.705915 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.717231 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.717295 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.717320 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.717379 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.717402 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:08Z","lastTransitionTime":"2025-11-29T04:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.731842 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.779345 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.800162 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.820009 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.820045 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.820057 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.820073 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.820083 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:08Z","lastTransitionTime":"2025-11-29T04:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.820298 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.832731 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.847474 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.859284 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.870741 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.881668 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.902196 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785813 5985 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:11:44.786909 5985 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:11:44.786928 5985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1129 04:11:44.786942 5985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1129 04:11:44.786948 5985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1129 04:11:44.786979 5985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:44.786975 5985 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785822 5985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:44.787261 5985 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:11:44.787276 5985 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:44.787283 5985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:44.787290 5985 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:44.787423 5985 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:12:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.915449 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.921917 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.921939 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.921946 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.921959 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.921967 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:08Z","lastTransitionTime":"2025-11-29T04:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.927593 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:08 crc kubenswrapper[4631]: I1129 04:12:08.935661 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:08Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.023949 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.023989 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.024000 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.024015 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.024026 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:09Z","lastTransitionTime":"2025-11-29T04:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.127103 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.127162 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.127178 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.127195 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.127207 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:09Z","lastTransitionTime":"2025-11-29T04:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.215538 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.215632 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.215662 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:09 crc kubenswrapper[4631]: E1129 04:12:09.215839 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:09 crc kubenswrapper[4631]: E1129 04:12:09.216069 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:09 crc kubenswrapper[4631]: E1129 04:12:09.216164 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.229659 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.229734 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.229751 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.229773 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.229788 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:09Z","lastTransitionTime":"2025-11-29T04:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.333831 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.333894 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.333912 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.333936 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.333956 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:09Z","lastTransitionTime":"2025-11-29T04:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.436749 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.436811 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.436830 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.436856 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.436876 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:09Z","lastTransitionTime":"2025-11-29T04:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.547218 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.547283 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.547303 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.547356 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.547375 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:09Z","lastTransitionTime":"2025-11-29T04:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.635751 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/2.log" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.636849 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/1.log" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.641781 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d" exitCode=1 Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.641844 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.641905 4631 scope.go:117] "RemoveContainer" containerID="26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.643076 4631 scope.go:117] "RemoveContainer" containerID="526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d" Nov 29 04:12:09 crc kubenswrapper[4631]: E1129 04:12:09.643375 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.650088 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.650149 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.650170 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.650196 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.650215 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:09Z","lastTransitionTime":"2025-11-29T04:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.667123 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.688010 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.710312 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.744589 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26b08fb29c2d0dddf48493e9a8ec1decb8cb039aefbf549e5879a4db9daec092\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:11:45Z\\\",\\\"message\\\":\\\"roller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785813 5985 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:11:44.786909 5985 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:11:44.786928 5985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1129 04:11:44.786942 5985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1129 04:11:44.786948 5985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1129 04:11:44.786979 5985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1129 04:11:44.786975 5985 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:11:44.785822 5985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:11:44.787261 5985 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:11:44.787276 5985 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1129 04:11:44.787283 5985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1129 04:11:44.787290 5985 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1129 04:11:44.787423 5985 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:09Z\\\",\\\"message\\\":\\\"p_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 04:12:09.178193 6245 factory.go:656] Stopping watch factory\\\\nI1129 04:12:09.178214 6245 ovnkube.go:599] Stopped ovnkube\\\\nI1129 04:12:09.178225 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:12:09.178246 6245 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 04:12:09.177445 6245 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1129 04:12:09.178296 6245 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1129 04:12:09.178315 6245 kube.go:317] Updating pod openshift-multus/network-metrics-daemon-b6vgh\\\\nF1129 04:12:09.178318 6245 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.754002 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.754157 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.754177 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.754203 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.754220 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:09Z","lastTransitionTime":"2025-11-29T04:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.761750 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.782749 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.804060 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.823642 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.841686 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.857598 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.857648 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.857666 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.857690 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.857708 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:09Z","lastTransitionTime":"2025-11-29T04:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.861437 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.878659 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.893148 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.915733 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.935626 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.953257 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.960703 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.960747 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.960765 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.960789 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.960806 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:09Z","lastTransitionTime":"2025-11-29T04:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:09 crc kubenswrapper[4631]: I1129 04:12:09.977323 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.000193 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:09Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.064083 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.064133 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.064149 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.064171 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.064188 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:10Z","lastTransitionTime":"2025-11-29T04:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.166400 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.166463 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.166486 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.166526 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.166548 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:10Z","lastTransitionTime":"2025-11-29T04:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.216487 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:10 crc kubenswrapper[4631]: E1129 04:12:10.216685 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.270250 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.270316 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.270371 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.270408 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.270429 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:10Z","lastTransitionTime":"2025-11-29T04:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.373747 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.373804 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.373820 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.373845 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.373861 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:10Z","lastTransitionTime":"2025-11-29T04:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.477067 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.477151 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.477177 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.477208 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.477231 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:10Z","lastTransitionTime":"2025-11-29T04:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.580674 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.580738 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.580758 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.580782 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.580799 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:10Z","lastTransitionTime":"2025-11-29T04:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.648693 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/2.log" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.654242 4631 scope.go:117] "RemoveContainer" containerID="526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d" Nov 29 04:12:10 crc kubenswrapper[4631]: E1129 04:12:10.654603 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.671757 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.683260 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.683311 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.683371 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.683460 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.683487 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:10Z","lastTransitionTime":"2025-11-29T04:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.687769 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.707051 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.724184 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.749127 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.769562 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.787072 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.787123 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.787139 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.787161 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.787177 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:10Z","lastTransitionTime":"2025-11-29T04:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.796864 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.820476 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.842560 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.862086 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.882521 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.890168 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.890239 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.890262 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.890293 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.890315 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:10Z","lastTransitionTime":"2025-11-29T04:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.903029 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.934426 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:09Z\\\",\\\"message\\\":\\\"p_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 04:12:09.178193 6245 factory.go:656] Stopping watch factory\\\\nI1129 04:12:09.178214 6245 ovnkube.go:599] Stopped ovnkube\\\\nI1129 04:12:09.178225 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:12:09.178246 6245 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 04:12:09.177445 6245 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1129 04:12:09.178296 6245 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1129 04:12:09.178315 6245 kube.go:317] Updating pod openshift-multus/network-metrics-daemon-b6vgh\\\\nF1129 04:12:09.178318 6245 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.952063 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.975165 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.992695 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.992744 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.992753 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.992769 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.992778 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:10Z","lastTransitionTime":"2025-11-29T04:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:10 crc kubenswrapper[4631]: I1129 04:12:10.994926 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:10Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.010239 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:11Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.095606 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.095665 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.095682 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.095707 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.095726 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:11Z","lastTransitionTime":"2025-11-29T04:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.198427 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.198483 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.198499 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.198522 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.198539 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:11Z","lastTransitionTime":"2025-11-29T04:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.215508 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.215556 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.215672 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:11 crc kubenswrapper[4631]: E1129 04:12:11.215861 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:11 crc kubenswrapper[4631]: E1129 04:12:11.216018 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:11 crc kubenswrapper[4631]: E1129 04:12:11.216130 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.301983 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.302054 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.302079 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.302108 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.302132 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:11Z","lastTransitionTime":"2025-11-29T04:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.404962 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.405019 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.405036 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.405058 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.405074 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:11Z","lastTransitionTime":"2025-11-29T04:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.508834 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.508894 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.508912 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.508935 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.508952 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:11Z","lastTransitionTime":"2025-11-29T04:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.612030 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.612108 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.612131 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.612167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.612188 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:11Z","lastTransitionTime":"2025-11-29T04:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.714745 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.714844 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.714868 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.714900 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.714922 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:11Z","lastTransitionTime":"2025-11-29T04:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.817675 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.817724 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.817737 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.817754 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.817766 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:11Z","lastTransitionTime":"2025-11-29T04:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.920680 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.920757 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.920777 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.920804 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:11 crc kubenswrapper[4631]: I1129 04:12:11.920825 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:11Z","lastTransitionTime":"2025-11-29T04:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.023652 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.023698 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.023714 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.023735 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.023754 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:12Z","lastTransitionTime":"2025-11-29T04:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.126517 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.126600 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.126617 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.126640 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.126659 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:12Z","lastTransitionTime":"2025-11-29T04:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.216549 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:12 crc kubenswrapper[4631]: E1129 04:12:12.216761 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.229767 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.229821 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.229837 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.229859 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.229875 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:12Z","lastTransitionTime":"2025-11-29T04:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.333223 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.333290 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.333315 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.333377 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.333400 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:12Z","lastTransitionTime":"2025-11-29T04:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.436732 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.436798 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.436821 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.436844 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.436861 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:12Z","lastTransitionTime":"2025-11-29T04:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.539538 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.539612 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.539636 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.539664 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.539684 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:12Z","lastTransitionTime":"2025-11-29T04:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.642606 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.642676 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.642700 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.642727 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.642753 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:12Z","lastTransitionTime":"2025-11-29T04:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.746170 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.746227 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.746243 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.746265 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.746284 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:12Z","lastTransitionTime":"2025-11-29T04:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.849960 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.850033 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.850060 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.850090 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.850110 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:12Z","lastTransitionTime":"2025-11-29T04:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.953385 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.953459 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.953482 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.953507 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:12 crc kubenswrapper[4631]: I1129 04:12:12.953529 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:12Z","lastTransitionTime":"2025-11-29T04:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.057321 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.057498 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.057530 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.057565 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.057603 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:13Z","lastTransitionTime":"2025-11-29T04:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.161235 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.161275 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.161287 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.161303 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.161318 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:13Z","lastTransitionTime":"2025-11-29T04:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.216060 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:13 crc kubenswrapper[4631]: E1129 04:12:13.216285 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.216736 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.216819 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:13 crc kubenswrapper[4631]: E1129 04:12:13.216943 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:13 crc kubenswrapper[4631]: E1129 04:12:13.217250 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.244071 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:09Z\\\",\\\"message\\\":\\\"p_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 04:12:09.178193 6245 factory.go:656] Stopping watch factory\\\\nI1129 04:12:09.178214 6245 ovnkube.go:599] Stopped ovnkube\\\\nI1129 04:12:09.178225 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:12:09.178246 6245 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 04:12:09.177445 6245 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1129 04:12:09.178296 6245 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1129 04:12:09.178315 6245 kube.go:317] Updating pod openshift-multus/network-metrics-daemon-b6vgh\\\\nF1129 04:12:09.178318 6245 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.260108 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.264283 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.264383 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.264409 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.264438 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.264460 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:13Z","lastTransitionTime":"2025-11-29T04:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.289722 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.331978 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.358072 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.366815 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.366838 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.366848 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.366865 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.366876 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:13Z","lastTransitionTime":"2025-11-29T04:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.369555 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.382604 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.394701 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.402902 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.412749 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.424361 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.434272 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.443258 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.457333 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.468611 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.468671 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.468688 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.468712 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.468729 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:13Z","lastTransitionTime":"2025-11-29T04:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.470607 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.485538 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.499776 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:13Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.571144 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.571207 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.571228 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.571255 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.571271 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:13Z","lastTransitionTime":"2025-11-29T04:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.673239 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.673298 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.673318 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.673369 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.673388 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:13Z","lastTransitionTime":"2025-11-29T04:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.776061 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.776122 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.776142 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.776169 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.776188 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:13Z","lastTransitionTime":"2025-11-29T04:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.879706 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.879777 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.879800 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.879846 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.879867 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:13Z","lastTransitionTime":"2025-11-29T04:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.983309 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.983423 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.983445 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.983473 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:13 crc kubenswrapper[4631]: I1129 04:12:13.983491 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:13Z","lastTransitionTime":"2025-11-29T04:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.086385 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.086440 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.086463 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.086489 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.086507 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:14Z","lastTransitionTime":"2025-11-29T04:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.190149 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.190197 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.190214 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.190236 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.190252 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:14Z","lastTransitionTime":"2025-11-29T04:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.216432 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:14 crc kubenswrapper[4631]: E1129 04:12:14.216748 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.292510 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.292544 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.292554 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.292571 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.292583 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:14Z","lastTransitionTime":"2025-11-29T04:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.394834 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.394868 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.394879 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.394895 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.394906 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:14Z","lastTransitionTime":"2025-11-29T04:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.498718 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.498788 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.498814 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.498879 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.498906 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:14Z","lastTransitionTime":"2025-11-29T04:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.602093 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.602148 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.602167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.602189 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.602204 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:14Z","lastTransitionTime":"2025-11-29T04:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.704819 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.704885 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.704910 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.704944 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.704967 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:14Z","lastTransitionTime":"2025-11-29T04:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.807663 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.807739 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.807757 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.807783 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.807802 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:14Z","lastTransitionTime":"2025-11-29T04:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.910293 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.910379 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.910400 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.910427 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:14 crc kubenswrapper[4631]: I1129 04:12:14.910442 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:14Z","lastTransitionTime":"2025-11-29T04:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.013193 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.013250 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.013266 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.013288 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.013306 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:15Z","lastTransitionTime":"2025-11-29T04:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.116418 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.116472 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.116484 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.116501 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.116517 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:15Z","lastTransitionTime":"2025-11-29T04:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.215845 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.215923 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.215936 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:15 crc kubenswrapper[4631]: E1129 04:12:15.216027 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:15 crc kubenswrapper[4631]: E1129 04:12:15.216116 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:15 crc kubenswrapper[4631]: E1129 04:12:15.216296 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.218807 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.218856 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.218875 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.218900 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.218922 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:15Z","lastTransitionTime":"2025-11-29T04:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.321761 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.321797 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.321848 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.321865 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.321877 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:15Z","lastTransitionTime":"2025-11-29T04:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.424796 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.424831 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.424843 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.424857 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.424868 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:15Z","lastTransitionTime":"2025-11-29T04:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.527927 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.528018 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.528032 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.528050 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.528069 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:15Z","lastTransitionTime":"2025-11-29T04:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.631521 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.631577 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.631586 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.631599 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.631607 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:15Z","lastTransitionTime":"2025-11-29T04:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.733834 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.733919 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.733935 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.733958 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.733975 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:15Z","lastTransitionTime":"2025-11-29T04:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.837373 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.837448 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.837466 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.837495 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.837513 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:15Z","lastTransitionTime":"2025-11-29T04:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.940539 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.940609 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.940626 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.940652 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:15 crc kubenswrapper[4631]: I1129 04:12:15.940673 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:15Z","lastTransitionTime":"2025-11-29T04:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.042952 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.042994 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.043009 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.043026 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.043036 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:16Z","lastTransitionTime":"2025-11-29T04:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.146708 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.146764 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.146784 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.146809 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.146826 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:16Z","lastTransitionTime":"2025-11-29T04:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.216423 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:16 crc kubenswrapper[4631]: E1129 04:12:16.216646 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.249081 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.249177 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.249195 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.249239 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.249257 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:16Z","lastTransitionTime":"2025-11-29T04:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.352296 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.352388 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.352408 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.352436 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.352453 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:16Z","lastTransitionTime":"2025-11-29T04:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.455621 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.455682 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.455700 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.455726 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.455744 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:16Z","lastTransitionTime":"2025-11-29T04:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.558977 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.559062 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.559113 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.559139 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.559157 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:16Z","lastTransitionTime":"2025-11-29T04:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.661760 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.661815 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.661833 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.661860 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.661879 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:16Z","lastTransitionTime":"2025-11-29T04:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.765046 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.765093 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.765110 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.765131 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.765148 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:16Z","lastTransitionTime":"2025-11-29T04:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.867687 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.867960 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.868074 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.868167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.868245 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:16Z","lastTransitionTime":"2025-11-29T04:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.970920 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.971014 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.971063 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.971089 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:16 crc kubenswrapper[4631]: I1129 04:12:16.971106 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:16Z","lastTransitionTime":"2025-11-29T04:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.073600 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.073635 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.073645 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.073657 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.073699 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.132434 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.132498 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.132509 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.132532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.132545 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: E1129 04:12:17.143064 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:17Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.146167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.146215 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.146226 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.146246 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.146257 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: E1129 04:12:17.155749 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:17Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.158698 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.158734 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.158744 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.158778 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.158789 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: E1129 04:12:17.169124 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:17Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.171697 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.171718 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.171743 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.171756 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.171765 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: E1129 04:12:17.182469 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:17Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.186360 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.186420 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.186610 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.186633 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.186645 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: E1129 04:12:17.198016 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:17Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:17 crc kubenswrapper[4631]: E1129 04:12:17.198181 4631 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.200685 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.200751 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.200769 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.200792 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.200810 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.215558 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:17 crc kubenswrapper[4631]: E1129 04:12:17.215687 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.215880 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:17 crc kubenswrapper[4631]: E1129 04:12:17.215931 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.216110 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:17 crc kubenswrapper[4631]: E1129 04:12:17.216159 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.303822 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.303905 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.303926 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.303950 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.303968 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.406651 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.406731 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.406755 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.407223 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.407261 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.509627 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.509673 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.509689 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.509712 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.509728 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.612278 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.612324 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.612346 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.612359 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.612369 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.714783 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.714826 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.714837 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.714852 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.714862 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.817393 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.817451 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.817464 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.817484 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.817496 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.920079 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.920143 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.920163 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.920188 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:17 crc kubenswrapper[4631]: I1129 04:12:17.920206 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:17Z","lastTransitionTime":"2025-11-29T04:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.023000 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.023042 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.023051 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.023071 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.023080 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:18Z","lastTransitionTime":"2025-11-29T04:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.126200 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.126250 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.126260 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.126277 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.126288 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:18Z","lastTransitionTime":"2025-11-29T04:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.215906 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:18 crc kubenswrapper[4631]: E1129 04:12:18.216052 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.228803 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.228836 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.228845 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.228862 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.228871 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:18Z","lastTransitionTime":"2025-11-29T04:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.330851 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.330897 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.330909 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.330927 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.330942 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:18Z","lastTransitionTime":"2025-11-29T04:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.433776 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.433830 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.433847 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.433868 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.433886 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:18Z","lastTransitionTime":"2025-11-29T04:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.536084 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.536120 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.536130 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.536144 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.536155 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:18Z","lastTransitionTime":"2025-11-29T04:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.638827 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.638887 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.638897 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.638914 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.638925 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:18Z","lastTransitionTime":"2025-11-29T04:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.694028 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:18 crc kubenswrapper[4631]: E1129 04:12:18.694206 4631 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:12:18 crc kubenswrapper[4631]: E1129 04:12:18.694275 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs podName:c6c5bb91-f03c-4672-bc61-69a68b8c89d6 nodeName:}" failed. No retries permitted until 2025-11-29 04:12:50.694254569 +0000 UTC m=+97.758758203 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs") pod "network-metrics-daemon-b6vgh" (UID: "c6c5bb91-f03c-4672-bc61-69a68b8c89d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.741659 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.741715 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.741731 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.741755 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.741771 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:18Z","lastTransitionTime":"2025-11-29T04:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.844598 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.844624 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.844632 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.844644 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.844652 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:18Z","lastTransitionTime":"2025-11-29T04:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.947285 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.947380 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.947397 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.947418 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:18 crc kubenswrapper[4631]: I1129 04:12:18.947433 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:18Z","lastTransitionTime":"2025-11-29T04:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.050011 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.050050 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.050064 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.050079 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.050089 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:19Z","lastTransitionTime":"2025-11-29T04:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.152059 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.152115 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.152138 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.152162 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.152179 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:19Z","lastTransitionTime":"2025-11-29T04:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.216375 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.216428 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:19 crc kubenswrapper[4631]: E1129 04:12:19.216550 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.216398 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:19 crc kubenswrapper[4631]: E1129 04:12:19.216786 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:19 crc kubenswrapper[4631]: E1129 04:12:19.216848 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.229693 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.254582 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.255016 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.255040 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.255074 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.255090 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:19Z","lastTransitionTime":"2025-11-29T04:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.357407 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.357724 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.357738 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.357760 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.357777 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:19Z","lastTransitionTime":"2025-11-29T04:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.460493 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.460533 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.460545 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.460562 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.460574 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:19Z","lastTransitionTime":"2025-11-29T04:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.563208 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.563242 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.563250 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.563264 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.563273 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:19Z","lastTransitionTime":"2025-11-29T04:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.665919 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.665960 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.665975 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.665991 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.666003 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:19Z","lastTransitionTime":"2025-11-29T04:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.700375 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-pbk6b_7f871e13-bbe2-4104-8f40-70e695653fef/kube-multus/0.log" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.700437 4631 generic.go:334] "Generic (PLEG): container finished" podID="7f871e13-bbe2-4104-8f40-70e695653fef" containerID="323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e" exitCode=1 Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.700536 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-pbk6b" event={"ID":"7f871e13-bbe2-4104-8f40-70e695653fef","Type":"ContainerDied","Data":"323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.700946 4631 scope.go:117] "RemoveContainer" containerID="323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.722753 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.733271 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"2025-11-29T04:11:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7\\\\n2025-11-29T04:11:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7 to /host/opt/cni/bin/\\\\n2025-11-29T04:11:34Z [verbose] multus-daemon started\\\\n2025-11-29T04:11:34Z [verbose] Readiness Indicator file check\\\\n2025-11-29T04:12:19Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.744348 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.753593 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"997c5f8c-f979-45a7-a56f-2e451db56e01\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.766788 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.767764 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.767786 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.767795 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.767809 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.767818 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:19Z","lastTransitionTime":"2025-11-29T04:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.782059 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.801029 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:09Z\\\",\\\"message\\\":\\\"p_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 04:12:09.178193 6245 factory.go:656] Stopping watch factory\\\\nI1129 04:12:09.178214 6245 ovnkube.go:599] Stopped ovnkube\\\\nI1129 04:12:09.178225 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:12:09.178246 6245 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 04:12:09.177445 6245 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1129 04:12:09.178296 6245 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1129 04:12:09.178315 6245 kube.go:317] Updating pod openshift-multus/network-metrics-daemon-b6vgh\\\\nF1129 04:12:09.178318 6245 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.809049 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.818284 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.830660 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.843302 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.853877 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.863215 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.869669 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.869704 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.869717 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.869734 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.869747 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:19Z","lastTransitionTime":"2025-11-29T04:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.875613 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.884936 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.896542 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.905524 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.914405 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:19Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.971989 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.972016 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.972023 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.972036 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:19 crc kubenswrapper[4631]: I1129 04:12:19.972044 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:19Z","lastTransitionTime":"2025-11-29T04:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.074288 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.074324 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.074361 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.074375 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.074383 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:20Z","lastTransitionTime":"2025-11-29T04:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.176708 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.176753 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.176764 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.176780 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.176790 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:20Z","lastTransitionTime":"2025-11-29T04:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.215709 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:20 crc kubenswrapper[4631]: E1129 04:12:20.215892 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.279823 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.279896 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.279914 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.279939 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.279956 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:20Z","lastTransitionTime":"2025-11-29T04:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.382452 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.382552 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.382570 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.382596 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.382614 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:20Z","lastTransitionTime":"2025-11-29T04:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.485037 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.485084 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.485098 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.485114 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.485124 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:20Z","lastTransitionTime":"2025-11-29T04:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.588451 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.588661 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.588736 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.588806 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.588875 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:20Z","lastTransitionTime":"2025-11-29T04:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.690491 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.690535 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.690545 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.690560 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.690568 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:20Z","lastTransitionTime":"2025-11-29T04:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.704674 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-pbk6b_7f871e13-bbe2-4104-8f40-70e695653fef/kube-multus/0.log" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.704727 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-pbk6b" event={"ID":"7f871e13-bbe2-4104-8f40-70e695653fef","Type":"ContainerStarted","Data":"73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.721054 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.734598 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.743837 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.759227 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.773277 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.781789 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.793673 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.793705 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.793715 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.793731 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.793739 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:20Z","lastTransitionTime":"2025-11-29T04:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.796736 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.809355 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"997c5f8c-f979-45a7-a56f-2e451db56e01\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.821238 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.834569 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.852566 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"2025-11-29T04:11:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7\\\\n2025-11-29T04:11:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7 to /host/opt/cni/bin/\\\\n2025-11-29T04:11:34Z [verbose] multus-daemon started\\\\n2025-11-29T04:11:34Z [verbose] Readiness Indicator file check\\\\n2025-11-29T04:12:19Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.863074 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.874880 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.884424 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.895407 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.895435 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.895444 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.895458 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.895468 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:20Z","lastTransitionTime":"2025-11-29T04:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.898541 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.918910 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:09Z\\\",\\\"message\\\":\\\"p_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 04:12:09.178193 6245 factory.go:656] Stopping watch factory\\\\nI1129 04:12:09.178214 6245 ovnkube.go:599] Stopped ovnkube\\\\nI1129 04:12:09.178225 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:12:09.178246 6245 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 04:12:09.177445 6245 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1129 04:12:09.178296 6245 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1129 04:12:09.178315 6245 kube.go:317] Updating pod openshift-multus/network-metrics-daemon-b6vgh\\\\nF1129 04:12:09.178318 6245 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.930316 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.941944 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:20Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.997664 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.997752 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.997764 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.997798 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:20 crc kubenswrapper[4631]: I1129 04:12:20.997809 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:20Z","lastTransitionTime":"2025-11-29T04:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.100189 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.100223 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.100233 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.100253 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.100265 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:21Z","lastTransitionTime":"2025-11-29T04:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.202547 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.202597 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.202617 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.202640 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.202659 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:21Z","lastTransitionTime":"2025-11-29T04:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.218160 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:21 crc kubenswrapper[4631]: E1129 04:12:21.218345 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.218597 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.218630 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:21 crc kubenswrapper[4631]: E1129 04:12:21.218750 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:21 crc kubenswrapper[4631]: E1129 04:12:21.218890 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.304597 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.304671 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.304695 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.304728 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.304752 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:21Z","lastTransitionTime":"2025-11-29T04:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.407237 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.407288 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.407303 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.407322 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.407349 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:21Z","lastTransitionTime":"2025-11-29T04:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.510092 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.510135 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.510146 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.510162 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.510174 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:21Z","lastTransitionTime":"2025-11-29T04:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.612554 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.612589 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.612601 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.612620 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.612631 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:21Z","lastTransitionTime":"2025-11-29T04:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.714914 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.715007 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.715033 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.715062 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.715079 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:21Z","lastTransitionTime":"2025-11-29T04:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.817531 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.817575 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.817588 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.817606 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.817619 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:21Z","lastTransitionTime":"2025-11-29T04:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.919649 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.919694 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.919739 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.919751 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:21 crc kubenswrapper[4631]: I1129 04:12:21.919760 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:21Z","lastTransitionTime":"2025-11-29T04:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.022973 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.023031 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.023047 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.023070 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.023086 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:22Z","lastTransitionTime":"2025-11-29T04:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.125983 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.126012 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.126021 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.126033 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.126043 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:22Z","lastTransitionTime":"2025-11-29T04:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.216139 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:22 crc kubenswrapper[4631]: E1129 04:12:22.216246 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.227856 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.227894 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.227902 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.227916 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.227926 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:22Z","lastTransitionTime":"2025-11-29T04:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.329890 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.329930 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.329937 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.329951 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.329960 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:22Z","lastTransitionTime":"2025-11-29T04:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.431962 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.432009 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.432020 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.432038 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.432050 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:22Z","lastTransitionTime":"2025-11-29T04:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.533978 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.534019 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.534033 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.534048 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.534060 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:22Z","lastTransitionTime":"2025-11-29T04:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.636130 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.636168 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.636177 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.636191 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.636200 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:22Z","lastTransitionTime":"2025-11-29T04:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.739148 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.739200 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.739217 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.739242 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.739258 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:22Z","lastTransitionTime":"2025-11-29T04:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.841613 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.841670 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.841688 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.841711 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.841731 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:22Z","lastTransitionTime":"2025-11-29T04:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.949257 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.949303 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.949320 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.949372 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:22 crc kubenswrapper[4631]: I1129 04:12:22.949395 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:22Z","lastTransitionTime":"2025-11-29T04:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.052621 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.052679 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.052696 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.052721 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.052742 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:23Z","lastTransitionTime":"2025-11-29T04:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.155304 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.155385 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.155404 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.155491 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.155514 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:23Z","lastTransitionTime":"2025-11-29T04:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.217472 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:23 crc kubenswrapper[4631]: E1129 04:12:23.217583 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.218064 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:23 crc kubenswrapper[4631]: E1129 04:12:23.218136 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.218257 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:23 crc kubenswrapper[4631]: E1129 04:12:23.218319 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.219014 4631 scope.go:117] "RemoveContainer" containerID="526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d" Nov 29 04:12:23 crc kubenswrapper[4631]: E1129 04:12:23.219140 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.236701 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.250570 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.258947 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.259005 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.259024 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.259051 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.259071 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:23Z","lastTransitionTime":"2025-11-29T04:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.272042 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:09Z\\\",\\\"message\\\":\\\"p_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 04:12:09.178193 6245 factory.go:656] Stopping watch factory\\\\nI1129 04:12:09.178214 6245 ovnkube.go:599] Stopped ovnkube\\\\nI1129 04:12:09.178225 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:12:09.178246 6245 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 04:12:09.177445 6245 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1129 04:12:09.178296 6245 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1129 04:12:09.178315 6245 kube.go:317] Updating pod openshift-multus/network-metrics-daemon-b6vgh\\\\nF1129 04:12:09.178318 6245 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.282806 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.298729 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.316872 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.330676 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.348126 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.358705 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.361015 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.361050 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.361060 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.361074 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.361083 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:23Z","lastTransitionTime":"2025-11-29T04:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.373579 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.387260 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.400920 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.412731 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.429325 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.442028 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.451610 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"2025-11-29T04:11:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7\\\\n2025-11-29T04:11:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7 to /host/opt/cni/bin/\\\\n2025-11-29T04:11:34Z [verbose] multus-daemon started\\\\n2025-11-29T04:11:34Z [verbose] Readiness Indicator file check\\\\n2025-11-29T04:12:19Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.462453 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.464052 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.464109 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.464128 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.464152 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.464169 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:23Z","lastTransitionTime":"2025-11-29T04:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.476565 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"997c5f8c-f979-45a7-a56f-2e451db56e01\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:23Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.566329 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.566368 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.566377 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.566391 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.566400 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:23Z","lastTransitionTime":"2025-11-29T04:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.669563 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.669610 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.669658 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.669680 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.669691 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:23Z","lastTransitionTime":"2025-11-29T04:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.771165 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.771206 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.771215 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.771232 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.771244 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:23Z","lastTransitionTime":"2025-11-29T04:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.873476 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.873531 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.873548 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.873572 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.873592 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:23Z","lastTransitionTime":"2025-11-29T04:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.976774 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.976812 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.976820 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.976834 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:23 crc kubenswrapper[4631]: I1129 04:12:23.976843 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:23Z","lastTransitionTime":"2025-11-29T04:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.079418 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.079443 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.079451 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.079463 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.079471 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:24Z","lastTransitionTime":"2025-11-29T04:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.181655 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.181680 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.181688 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.181701 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.181726 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:24Z","lastTransitionTime":"2025-11-29T04:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.216461 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:24 crc kubenswrapper[4631]: E1129 04:12:24.216711 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.283686 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.283786 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.283842 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.283869 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.283923 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:24Z","lastTransitionTime":"2025-11-29T04:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.388606 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.388645 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.388653 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.388669 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.388679 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:24Z","lastTransitionTime":"2025-11-29T04:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.492088 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.492128 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.492137 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.492152 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.492162 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:24Z","lastTransitionTime":"2025-11-29T04:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.594556 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.594604 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.594620 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.594643 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.594660 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:24Z","lastTransitionTime":"2025-11-29T04:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.696641 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.696675 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.696686 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.696701 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.696713 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:24Z","lastTransitionTime":"2025-11-29T04:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.798865 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.798896 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.798907 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.798923 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.798934 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:24Z","lastTransitionTime":"2025-11-29T04:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.901689 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.901735 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.901746 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.901762 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:24 crc kubenswrapper[4631]: I1129 04:12:24.901775 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:24Z","lastTransitionTime":"2025-11-29T04:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.004411 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.004480 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.004506 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.004536 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.004558 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:25Z","lastTransitionTime":"2025-11-29T04:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.107450 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.107594 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.107617 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.107643 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.107734 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:25Z","lastTransitionTime":"2025-11-29T04:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.211835 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.211910 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.211933 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.211963 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.211984 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:25Z","lastTransitionTime":"2025-11-29T04:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.216153 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.216232 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:25 crc kubenswrapper[4631]: E1129 04:12:25.216305 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.216154 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:25 crc kubenswrapper[4631]: E1129 04:12:25.216465 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:25 crc kubenswrapper[4631]: E1129 04:12:25.216537 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.314504 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.314542 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.314551 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.314564 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.314574 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:25Z","lastTransitionTime":"2025-11-29T04:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.416494 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.416536 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.416571 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.416591 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.416603 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:25Z","lastTransitionTime":"2025-11-29T04:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.520059 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.520144 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.520167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.520203 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.520232 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:25Z","lastTransitionTime":"2025-11-29T04:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.623305 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.623427 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.623478 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.623503 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.623519 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:25Z","lastTransitionTime":"2025-11-29T04:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.726474 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.726946 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.727090 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.727301 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.727479 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:25Z","lastTransitionTime":"2025-11-29T04:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.831191 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.831253 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.831271 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.831296 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.831315 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:25Z","lastTransitionTime":"2025-11-29T04:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.934317 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.934808 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.935003 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.935154 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:25 crc kubenswrapper[4631]: I1129 04:12:25.935319 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:25Z","lastTransitionTime":"2025-11-29T04:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.040223 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.040296 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.040314 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.040367 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.040387 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:26Z","lastTransitionTime":"2025-11-29T04:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.143319 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.143405 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.143422 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.143445 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.143461 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:26Z","lastTransitionTime":"2025-11-29T04:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.216041 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:26 crc kubenswrapper[4631]: E1129 04:12:26.216278 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.245704 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.245767 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.245784 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.245813 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.245841 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:26Z","lastTransitionTime":"2025-11-29T04:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.349257 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.349293 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.349306 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.349324 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.349352 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:26Z","lastTransitionTime":"2025-11-29T04:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.451939 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.451985 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.452002 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.452023 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.452041 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:26Z","lastTransitionTime":"2025-11-29T04:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.555632 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.555691 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.555711 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.555737 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.555754 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:26Z","lastTransitionTime":"2025-11-29T04:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.664221 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.664282 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.664306 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.664359 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.664381 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:26Z","lastTransitionTime":"2025-11-29T04:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.767612 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.767667 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.767685 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.767709 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.767729 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:26Z","lastTransitionTime":"2025-11-29T04:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.871923 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.871967 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.871978 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.871997 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.872010 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:26Z","lastTransitionTime":"2025-11-29T04:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.975511 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.975558 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.975577 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.975600 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:26 crc kubenswrapper[4631]: I1129 04:12:26.975617 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:26Z","lastTransitionTime":"2025-11-29T04:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.078983 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.079039 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.079058 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.079082 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.079101 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.182015 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.182063 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.182081 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.182105 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.182122 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.216644 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.216698 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:27 crc kubenswrapper[4631]: E1129 04:12:27.216854 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.217200 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:27 crc kubenswrapper[4631]: E1129 04:12:27.217374 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:27 crc kubenswrapper[4631]: E1129 04:12:27.217672 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.286063 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.286138 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.286164 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.286194 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.286216 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.326010 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.326059 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.326083 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.326117 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.326139 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: E1129 04:12:27.347833 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:27Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.353530 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.353581 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.353596 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.353620 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.353636 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: E1129 04:12:27.374524 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:27Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.380014 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.380048 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.380059 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.380077 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.380091 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: E1129 04:12:27.395187 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:27Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.399391 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.399441 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.399459 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.399481 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.399498 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: E1129 04:12:27.413231 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:27Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.417012 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.417039 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.417047 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.417059 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.417067 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: E1129 04:12:27.430138 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:27Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:27 crc kubenswrapper[4631]: E1129 04:12:27.430282 4631 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.432024 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.432068 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.432078 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.432090 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.432098 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.534874 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.534924 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.534941 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.534963 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.534979 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.637819 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.637874 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.637890 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.637913 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.637930 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.740906 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.740998 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.741020 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.741051 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.741070 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.844273 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.844410 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.844434 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.844464 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.844486 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.947642 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.947705 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.947727 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.947758 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:27 crc kubenswrapper[4631]: I1129 04:12:27.947785 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:27Z","lastTransitionTime":"2025-11-29T04:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.050457 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.050500 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.050518 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.050566 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.050582 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:28Z","lastTransitionTime":"2025-11-29T04:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.152490 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.152637 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.152655 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.152676 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.152690 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:28Z","lastTransitionTime":"2025-11-29T04:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.215740 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:28 crc kubenswrapper[4631]: E1129 04:12:28.215911 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.255582 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.255636 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.255651 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.255674 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.255690 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:28Z","lastTransitionTime":"2025-11-29T04:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.358824 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.358896 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.358917 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.358945 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.358967 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:28Z","lastTransitionTime":"2025-11-29T04:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.465300 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.465397 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.465419 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.465447 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.465467 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:28Z","lastTransitionTime":"2025-11-29T04:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.567954 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.568009 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.568025 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.568048 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.568065 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:28Z","lastTransitionTime":"2025-11-29T04:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.670854 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.670948 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.670967 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.670992 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.671009 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:28Z","lastTransitionTime":"2025-11-29T04:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.773557 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.773608 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.773624 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.773645 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.773688 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:28Z","lastTransitionTime":"2025-11-29T04:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.876464 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.876532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.876550 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.876573 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.876591 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:28Z","lastTransitionTime":"2025-11-29T04:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.979069 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.979110 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.979118 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.979133 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:28 crc kubenswrapper[4631]: I1129 04:12:28.979142 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:28Z","lastTransitionTime":"2025-11-29T04:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.082370 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.082415 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.082428 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.082446 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.082458 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:29Z","lastTransitionTime":"2025-11-29T04:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.186075 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.186150 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.186173 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.186206 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.186232 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:29Z","lastTransitionTime":"2025-11-29T04:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.215745 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.215827 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.215828 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:29 crc kubenswrapper[4631]: E1129 04:12:29.216090 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:29 crc kubenswrapper[4631]: E1129 04:12:29.216288 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:29 crc kubenswrapper[4631]: E1129 04:12:29.216469 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.290370 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.290444 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.290482 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.290516 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.290535 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:29Z","lastTransitionTime":"2025-11-29T04:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.393015 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.393110 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.393129 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.393153 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.393172 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:29Z","lastTransitionTime":"2025-11-29T04:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.495642 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.495700 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.495717 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.495738 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.495751 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:29Z","lastTransitionTime":"2025-11-29T04:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.597984 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.598048 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.598076 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.598108 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.598134 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:29Z","lastTransitionTime":"2025-11-29T04:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.701569 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.701619 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.701630 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.701648 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.701663 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:29Z","lastTransitionTime":"2025-11-29T04:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.804401 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.804460 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.804478 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.804503 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.804520 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:29Z","lastTransitionTime":"2025-11-29T04:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.908041 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.908114 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.908131 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.908159 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:29 crc kubenswrapper[4631]: I1129 04:12:29.908180 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:29Z","lastTransitionTime":"2025-11-29T04:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.010983 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.011048 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.011064 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.011088 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.011106 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:30Z","lastTransitionTime":"2025-11-29T04:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.114532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.114609 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.114634 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.114664 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.114686 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:30Z","lastTransitionTime":"2025-11-29T04:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.215475 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:30 crc kubenswrapper[4631]: E1129 04:12:30.215668 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.217457 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.217527 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.217545 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.217569 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.217587 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:30Z","lastTransitionTime":"2025-11-29T04:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.320544 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.320599 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.320615 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.320640 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.320657 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:30Z","lastTransitionTime":"2025-11-29T04:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.423609 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.423676 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.423693 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.423722 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.423740 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:30Z","lastTransitionTime":"2025-11-29T04:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.526880 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.526954 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.526973 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.527000 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.527019 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:30Z","lastTransitionTime":"2025-11-29T04:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.630161 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.630211 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.630228 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.630255 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.630273 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:30Z","lastTransitionTime":"2025-11-29T04:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.733523 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.733606 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.733629 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.733660 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.733681 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:30Z","lastTransitionTime":"2025-11-29T04:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.836640 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.836692 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.836709 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.836735 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.836754 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:30Z","lastTransitionTime":"2025-11-29T04:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.940172 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.940225 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.940243 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.940269 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:30 crc kubenswrapper[4631]: I1129 04:12:30.940286 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:30Z","lastTransitionTime":"2025-11-29T04:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.042810 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.042878 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.042918 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.042946 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.042962 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:31Z","lastTransitionTime":"2025-11-29T04:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.146323 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.146414 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.146436 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.146464 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.146482 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:31Z","lastTransitionTime":"2025-11-29T04:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.216036 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.216132 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.216046 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:31 crc kubenswrapper[4631]: E1129 04:12:31.216256 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:31 crc kubenswrapper[4631]: E1129 04:12:31.216508 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:31 crc kubenswrapper[4631]: E1129 04:12:31.216712 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.249327 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.249547 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.249719 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.249866 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.250003 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:31Z","lastTransitionTime":"2025-11-29T04:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.352359 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.352428 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.352445 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.352468 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.352489 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:31Z","lastTransitionTime":"2025-11-29T04:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.455140 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.455181 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.455198 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.455221 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.455238 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:31Z","lastTransitionTime":"2025-11-29T04:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.558463 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.558498 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.558509 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.558525 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.558536 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:31Z","lastTransitionTime":"2025-11-29T04:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.662163 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.662230 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.662281 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.662308 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.662324 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:31Z","lastTransitionTime":"2025-11-29T04:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.765287 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.765381 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.765407 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.765436 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.765457 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:31Z","lastTransitionTime":"2025-11-29T04:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.868612 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.868670 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.868686 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.868709 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.868724 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:31Z","lastTransitionTime":"2025-11-29T04:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.975852 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.975913 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.975936 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.975965 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:31 crc kubenswrapper[4631]: I1129 04:12:31.975983 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:31Z","lastTransitionTime":"2025-11-29T04:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.079125 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.079190 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.079213 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.079245 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.079262 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:32Z","lastTransitionTime":"2025-11-29T04:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.182618 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.182697 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.182714 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.182768 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.182785 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:32Z","lastTransitionTime":"2025-11-29T04:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.215797 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:32 crc kubenswrapper[4631]: E1129 04:12:32.215974 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.285790 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.285861 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.285884 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.285916 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.285937 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:32Z","lastTransitionTime":"2025-11-29T04:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.389761 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.389830 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.389847 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.389873 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.389891 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:32Z","lastTransitionTime":"2025-11-29T04:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.493939 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.494003 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.494026 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.494055 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.494079 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:32Z","lastTransitionTime":"2025-11-29T04:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.597061 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.597117 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.597134 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.597159 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.597177 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:32Z","lastTransitionTime":"2025-11-29T04:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.700789 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.700871 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.700891 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.701469 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.701508 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:32Z","lastTransitionTime":"2025-11-29T04:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.805673 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.805748 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.805771 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.805804 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.805829 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:32Z","lastTransitionTime":"2025-11-29T04:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.909168 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.909224 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.909241 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.909264 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:32 crc kubenswrapper[4631]: I1129 04:12:32.909282 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:32Z","lastTransitionTime":"2025-11-29T04:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.011897 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.012402 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.012646 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.012860 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.013070 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:33Z","lastTransitionTime":"2025-11-29T04:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.116585 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.116641 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.116657 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.116680 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.116697 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:33Z","lastTransitionTime":"2025-11-29T04:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.215503 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:33 crc kubenswrapper[4631]: E1129 04:12:33.215661 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.215851 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:33 crc kubenswrapper[4631]: E1129 04:12:33.216027 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.216109 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:33 crc kubenswrapper[4631]: E1129 04:12:33.216200 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.229799 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.229908 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.229927 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.229962 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.230001 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:33Z","lastTransitionTime":"2025-11-29T04:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.237398 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"997c5f8c-f979-45a7-a56f-2e451db56e01\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.259712 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.278017 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.298850 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"2025-11-29T04:11:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7\\\\n2025-11-29T04:11:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7 to /host/opt/cni/bin/\\\\n2025-11-29T04:11:34Z [verbose] multus-daemon started\\\\n2025-11-29T04:11:34Z [verbose] Readiness Indicator file check\\\\n2025-11-29T04:12:19Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.319158 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.332970 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.333289 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.333447 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.333574 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.333717 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:33Z","lastTransitionTime":"2025-11-29T04:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.337531 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.354104 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.370257 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.392043 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:09Z\\\",\\\"message\\\":\\\"p_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 04:12:09.178193 6245 factory.go:656] Stopping watch factory\\\\nI1129 04:12:09.178214 6245 ovnkube.go:599] Stopped ovnkube\\\\nI1129 04:12:09.178225 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:12:09.178246 6245 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 04:12:09.177445 6245 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1129 04:12:09.178296 6245 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1129 04:12:09.178315 6245 kube.go:317] Updating pod openshift-multus/network-metrics-daemon-b6vgh\\\\nF1129 04:12:09.178318 6245 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.402496 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.415805 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.434022 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.437996 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.438040 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.438050 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.438070 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.438083 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:33Z","lastTransitionTime":"2025-11-29T04:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.450164 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.463054 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.477065 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.487598 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.496747 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.510371 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:33Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.539796 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.539832 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.539844 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.539858 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.539868 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:33Z","lastTransitionTime":"2025-11-29T04:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.642758 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.642784 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.642791 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.642804 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.642812 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:33Z","lastTransitionTime":"2025-11-29T04:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.745381 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.745448 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.745473 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.745496 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.745512 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:33Z","lastTransitionTime":"2025-11-29T04:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.849018 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.849127 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.849147 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.849206 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.849224 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:33Z","lastTransitionTime":"2025-11-29T04:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.951372 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.951403 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.951412 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.951425 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:33 crc kubenswrapper[4631]: I1129 04:12:33.951433 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:33Z","lastTransitionTime":"2025-11-29T04:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.054254 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.054388 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.054407 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.054441 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.054458 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:34Z","lastTransitionTime":"2025-11-29T04:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.158255 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.158308 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.158392 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.158436 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.158459 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:34Z","lastTransitionTime":"2025-11-29T04:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.216328 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:34 crc kubenswrapper[4631]: E1129 04:12:34.216562 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.217650 4631 scope.go:117] "RemoveContainer" containerID="526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.261177 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.261213 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.261223 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.261240 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.261251 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:34Z","lastTransitionTime":"2025-11-29T04:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.364676 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.364743 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.364765 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.364791 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.364809 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:34Z","lastTransitionTime":"2025-11-29T04:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.466764 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.466798 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.466809 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.466824 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.466835 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:34Z","lastTransitionTime":"2025-11-29T04:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.568925 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.568981 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.568993 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.569009 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.569021 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:34Z","lastTransitionTime":"2025-11-29T04:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.671413 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.671446 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.671454 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.671466 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.671475 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:34Z","lastTransitionTime":"2025-11-29T04:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.751920 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/2.log" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.753871 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.754291 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.765119 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.774145 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.774187 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.774199 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.774217 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.774229 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:34Z","lastTransitionTime":"2025-11-29T04:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.784023 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.795143 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.810183 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.823508 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.840744 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.858597 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.873915 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"2025-11-29T04:11:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7\\\\n2025-11-29T04:11:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7 to /host/opt/cni/bin/\\\\n2025-11-29T04:11:34Z [verbose] multus-daemon started\\\\n2025-11-29T04:11:34Z [verbose] Readiness Indicator file check\\\\n2025-11-29T04:12:19Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.876981 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.877026 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.877038 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.877056 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.877067 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:34Z","lastTransitionTime":"2025-11-29T04:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.888486 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.899970 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"997c5f8c-f979-45a7-a56f-2e451db56e01\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.910397 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.927241 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.947553 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:09Z\\\",\\\"message\\\":\\\"p_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 04:12:09.178193 6245 factory.go:656] Stopping watch factory\\\\nI1129 04:12:09.178214 6245 ovnkube.go:599] Stopped ovnkube\\\\nI1129 04:12:09.178225 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:12:09.178246 6245 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 04:12:09.177445 6245 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1129 04:12:09.178296 6245 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1129 04:12:09.178315 6245 kube.go:317] Updating pod openshift-multus/network-metrics-daemon-b6vgh\\\\nF1129 04:12:09.178318 6245 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:12:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.958854 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.971769 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.979146 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.979190 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.979202 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.979215 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.979226 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:34Z","lastTransitionTime":"2025-11-29T04:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.982581 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:34 crc kubenswrapper[4631]: I1129 04:12:34.993722 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:34Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.004755 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.081174 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.081219 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.081229 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.081244 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.081253 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:35Z","lastTransitionTime":"2025-11-29T04:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.183989 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.184048 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.184064 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.184088 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.184105 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:35Z","lastTransitionTime":"2025-11-29T04:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.216671 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:35 crc kubenswrapper[4631]: E1129 04:12:35.216880 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.216679 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.217000 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:35 crc kubenswrapper[4631]: E1129 04:12:35.217118 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:35 crc kubenswrapper[4631]: E1129 04:12:35.217225 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.290512 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.290576 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.290597 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.290626 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.290647 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:35Z","lastTransitionTime":"2025-11-29T04:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.393053 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.393103 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.393120 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.393143 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.393162 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:35Z","lastTransitionTime":"2025-11-29T04:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.496922 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.496997 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.497015 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.497040 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.497058 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:35Z","lastTransitionTime":"2025-11-29T04:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.600658 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.600713 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.600731 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.600754 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.600772 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:35Z","lastTransitionTime":"2025-11-29T04:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.704113 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.704163 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.704214 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.704240 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.704259 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:35Z","lastTransitionTime":"2025-11-29T04:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.760907 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/3.log" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.762404 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/2.log" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.767008 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" exitCode=1 Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.767069 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d"} Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.767128 4631 scope.go:117] "RemoveContainer" containerID="526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.768643 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:12:35 crc kubenswrapper[4631]: E1129 04:12:35.769114 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.792108 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.808055 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.808377 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.808390 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.808407 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.808441 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:35Z","lastTransitionTime":"2025-11-29T04:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.810789 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.826755 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.848303 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.862801 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.885743 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.904386 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"997c5f8c-f979-45a7-a56f-2e451db56e01\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.911661 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.911843 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.911966 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.912135 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.912271 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:35Z","lastTransitionTime":"2025-11-29T04:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.925496 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.942183 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.963302 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"2025-11-29T04:11:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7\\\\n2025-11-29T04:11:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7 to /host/opt/cni/bin/\\\\n2025-11-29T04:11:34Z [verbose] multus-daemon started\\\\n2025-11-29T04:11:34Z [verbose] Readiness Indicator file check\\\\n2025-11-29T04:12:19Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:35 crc kubenswrapper[4631]: I1129 04:12:35.980972 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:35Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.002154 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.015200 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.015257 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.015273 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.015298 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.015315 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:36Z","lastTransitionTime":"2025-11-29T04:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.022299 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.041850 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.074028 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://526622895ac7aed5bfad7133374505f0236b7293db4d6702e06d11a1f879503d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:09Z\\\",\\\"message\\\":\\\"p_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1129 04:12:09.178193 6245 factory.go:656] Stopping watch factory\\\\nI1129 04:12:09.178214 6245 ovnkube.go:599] Stopped ovnkube\\\\nI1129 04:12:09.178225 6245 handler.go:208] Removed *v1.Node event handler 2\\\\nI1129 04:12:09.178246 6245 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1129 04:12:09.177445 6245 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1129 04:12:09.178296 6245 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1129 04:12:09.178315 6245 kube.go:317] Updating pod openshift-multus/network-metrics-daemon-b6vgh\\\\nF1129 04:12:09.178318 6245 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:35Z\\\",\\\"message\\\":\\\"ent/informers/externalversions/factory.go:141\\\\nI1129 04:12:35.048100 6579 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048155 6579 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048379 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048942 6579 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048102 6579 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:12:35.049190 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:12:35.049225 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:12:35.049276 6579 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1129 04:12:35.049319 6579 factory.go:656] Stopping watch factory\\\\nI1129 04:12:35.049381 6579 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:12:35.049486 6579 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:12:35.049563 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.092690 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.112876 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.117675 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.117866 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.118030 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.118223 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.118413 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:36Z","lastTransitionTime":"2025-11-29T04:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.136618 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.215457 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:36 crc kubenswrapper[4631]: E1129 04:12:36.215970 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.221303 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.221378 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.221396 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.221418 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.221434 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:36Z","lastTransitionTime":"2025-11-29T04:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.234542 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.324133 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.324188 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.324206 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.324231 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.324252 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:36Z","lastTransitionTime":"2025-11-29T04:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.426686 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.426741 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.426762 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.426786 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.426803 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:36Z","lastTransitionTime":"2025-11-29T04:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.529551 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.529597 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.529615 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.529636 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.529653 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:36Z","lastTransitionTime":"2025-11-29T04:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.632846 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.632896 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.632912 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.632934 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.632951 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:36Z","lastTransitionTime":"2025-11-29T04:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.735967 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.736033 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.736050 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.736075 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.736093 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:36Z","lastTransitionTime":"2025-11-29T04:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.773049 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/3.log" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.779047 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:12:36 crc kubenswrapper[4631]: E1129 04:12:36.779439 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.809906 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:35Z\\\",\\\"message\\\":\\\"ent/informers/externalversions/factory.go:141\\\\nI1129 04:12:35.048100 6579 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048155 6579 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048379 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048942 6579 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048102 6579 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:12:35.049190 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:12:35.049225 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:12:35.049276 6579 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1129 04:12:35.049319 6579 factory.go:656] Stopping watch factory\\\\nI1129 04:12:35.049381 6579 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:12:35.049486 6579 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:12:35.049563 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.824295 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.838779 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.838843 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.838866 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.838894 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.838913 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:36Z","lastTransitionTime":"2025-11-29T04:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.844533 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.865966 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.897967 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.931655 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.941036 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.941281 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.941421 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.941518 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.941597 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:36Z","lastTransitionTime":"2025-11-29T04:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.959645 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.983684 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"086f29b7-9b42-409a-8591-645d3616320b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6100415a919b68a8cdc6dfbae9d3c391ac6db8e3908f39f789d0d1694a3ddc8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52f43cf26605dac1b088b7580b5447cfab48e84fd32e325e4ef6415215e09661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e827ca3b15395b1539d10999b148ca19dcc72e36e2ce2539e6c66f286ea8148f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62eada265bec693d8dc6f513b807347f2870d5becfb53cdfc0a81c113fcf151b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecbe6fce889ea30339f989f5b40fab5157d4c50a77df9ddb8adcb56223755f73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee7f00ee91110b84305d8aca62531f4ff151324aa48d2d5018b4251db56af872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7f00ee91110b84305d8aca62531f4ff151324aa48d2d5018b4251db56af872\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a688ab04e6e05182db2c2f3bac369094bf7ecf634c117b01efc61fb757f5a938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a688ab04e6e05182db2c2f3bac369094bf7ecf634c117b01efc61fb757f5a938\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9022eec0ea1f259acc9309571973dbc1ea222f0ebf9cda5229aaf33e7ed7b442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9022eec0ea1f259acc9309571973dbc1ea222f0ebf9cda5229aaf33e7ed7b442\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:36 crc kubenswrapper[4631]: I1129 04:12:36.995537 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:36Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.002840 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.002975 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:41.002950801 +0000 UTC m=+148.067454305 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.003158 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.003292 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.003229 4631 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.003560 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:13:41.003541835 +0000 UTC m=+148.068045359 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.003393 4631 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.003759 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-29 04:13:41.00374777 +0000 UTC m=+148.068251294 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.008130 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.023204 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.041541 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.043410 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.043452 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.043468 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.043489 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.043504 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.051877 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.062238 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.075867 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"2025-11-29T04:11:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7\\\\n2025-11-29T04:11:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7 to /host/opt/cni/bin/\\\\n2025-11-29T04:11:34Z [verbose] multus-daemon started\\\\n2025-11-29T04:11:34Z [verbose] Readiness Indicator file check\\\\n2025-11-29T04:12:19Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.087601 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.098905 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"997c5f8c-f979-45a7-a56f-2e451db56e01\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.104500 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.104643 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.104671 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.104683 4631 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.104642 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.104722 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-29 04:13:41.10471109 +0000 UTC m=+148.169214594 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.104938 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.105015 4631 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.105076 4631 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.105167 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-29 04:13:41.105158651 +0000 UTC m=+148.169662155 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.113745 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.123507 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.145119 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.145179 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.145202 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.145232 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.145253 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.216187 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.216211 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.216362 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.216430 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.216903 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.216753 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.248056 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.248146 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.248169 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.248200 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.248224 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.350975 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.351017 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.351031 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.351045 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.351055 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.454255 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.454313 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.454362 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.454393 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.454415 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.557297 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.557435 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.557462 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.557495 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.557520 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.559649 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.559707 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.559725 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.559748 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.559766 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.581531 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.593781 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.593835 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.593852 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.593872 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.593888 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.614163 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.619551 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.619605 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.619623 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.619647 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.619662 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.641406 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.646099 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.646144 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.646160 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.646182 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.646200 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.666869 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.671496 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.671560 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.671578 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.671602 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.671619 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.691161 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:37Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:37 crc kubenswrapper[4631]: E1129 04:12:37.691411 4631 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.693276 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.693354 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.693371 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.693395 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.693412 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.796540 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.796611 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.796635 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.796664 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.796685 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.899308 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.899387 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.899405 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.899428 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:37 crc kubenswrapper[4631]: I1129 04:12:37.899445 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:37Z","lastTransitionTime":"2025-11-29T04:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.001916 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.001956 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.001966 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.001982 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.001995 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:38Z","lastTransitionTime":"2025-11-29T04:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.104753 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.104805 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.104823 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.104847 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.104864 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:38Z","lastTransitionTime":"2025-11-29T04:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.208238 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.208295 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.208311 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.208362 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.208380 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:38Z","lastTransitionTime":"2025-11-29T04:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.215855 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:38 crc kubenswrapper[4631]: E1129 04:12:38.216043 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.311307 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.311396 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.311418 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.311447 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.311472 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:38Z","lastTransitionTime":"2025-11-29T04:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.413985 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.414037 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.414045 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.414059 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.414069 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:38Z","lastTransitionTime":"2025-11-29T04:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.517859 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.517915 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.517931 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.517950 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.517966 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:38Z","lastTransitionTime":"2025-11-29T04:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.621125 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.621193 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.621211 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.621235 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.621253 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:38Z","lastTransitionTime":"2025-11-29T04:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.723931 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.724000 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.724027 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.724056 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.724079 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:38Z","lastTransitionTime":"2025-11-29T04:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.826977 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.827045 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.827062 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.827088 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.827107 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:38Z","lastTransitionTime":"2025-11-29T04:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.930244 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.930303 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.930319 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.930375 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:38 crc kubenswrapper[4631]: I1129 04:12:38.930392 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:38Z","lastTransitionTime":"2025-11-29T04:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.035781 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.035845 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.035870 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.035898 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.035917 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:39Z","lastTransitionTime":"2025-11-29T04:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.138262 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.138324 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.138376 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.138396 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.138413 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:39Z","lastTransitionTime":"2025-11-29T04:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.216483 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.216551 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:39 crc kubenswrapper[4631]: E1129 04:12:39.216639 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.216725 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:39 crc kubenswrapper[4631]: E1129 04:12:39.216956 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:39 crc kubenswrapper[4631]: E1129 04:12:39.217294 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.240644 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.240689 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.240707 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.240727 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.240743 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:39Z","lastTransitionTime":"2025-11-29T04:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.350649 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.350726 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.350744 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.350778 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.351508 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:39Z","lastTransitionTime":"2025-11-29T04:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.454952 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.455618 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.455800 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.455946 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.456141 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:39Z","lastTransitionTime":"2025-11-29T04:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.559174 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.559231 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.559254 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.559285 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.559307 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:39Z","lastTransitionTime":"2025-11-29T04:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.662512 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.662621 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.662650 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.662686 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.662711 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:39Z","lastTransitionTime":"2025-11-29T04:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.766101 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.766165 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.766183 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.766212 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.766229 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:39Z","lastTransitionTime":"2025-11-29T04:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.869573 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.869638 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.869655 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.869681 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.869699 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:39Z","lastTransitionTime":"2025-11-29T04:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.972464 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.972691 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.972829 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.972954 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:39 crc kubenswrapper[4631]: I1129 04:12:39.973076 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:39Z","lastTransitionTime":"2025-11-29T04:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.075678 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.075726 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.075744 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.075766 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.075784 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:40Z","lastTransitionTime":"2025-11-29T04:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.178455 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.178502 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.178540 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.178561 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.178579 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:40Z","lastTransitionTime":"2025-11-29T04:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.216121 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:40 crc kubenswrapper[4631]: E1129 04:12:40.216491 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.281170 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.281278 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.281298 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.281327 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.281378 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:40Z","lastTransitionTime":"2025-11-29T04:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.383662 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.384107 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.384267 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.384470 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.384614 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:40Z","lastTransitionTime":"2025-11-29T04:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.487911 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.487973 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.487990 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.488015 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.488033 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:40Z","lastTransitionTime":"2025-11-29T04:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.591366 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.591426 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.591442 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.591466 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.591508 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:40Z","lastTransitionTime":"2025-11-29T04:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.694635 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.694702 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.694725 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.694755 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.694777 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:40Z","lastTransitionTime":"2025-11-29T04:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.797907 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.798450 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.798899 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.799141 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.799488 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:40Z","lastTransitionTime":"2025-11-29T04:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.903045 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.903097 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.903116 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.903141 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:40 crc kubenswrapper[4631]: I1129 04:12:40.903157 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:40Z","lastTransitionTime":"2025-11-29T04:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.006561 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.006618 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.006679 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.006704 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.006724 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:41Z","lastTransitionTime":"2025-11-29T04:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.109434 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.109821 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.109975 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.110186 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.110413 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:41Z","lastTransitionTime":"2025-11-29T04:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.213607 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.213951 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.214111 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.214372 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.214530 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:41Z","lastTransitionTime":"2025-11-29T04:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.216302 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:41 crc kubenswrapper[4631]: E1129 04:12:41.216526 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.216315 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.216592 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:41 crc kubenswrapper[4631]: E1129 04:12:41.216704 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:41 crc kubenswrapper[4631]: E1129 04:12:41.216842 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.318508 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.318935 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.319087 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.319247 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.319424 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:41Z","lastTransitionTime":"2025-11-29T04:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.422370 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.422430 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.422447 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.422472 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.422490 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:41Z","lastTransitionTime":"2025-11-29T04:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.525404 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.525679 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.525764 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.525847 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.526001 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:41Z","lastTransitionTime":"2025-11-29T04:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.629440 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.629502 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.629525 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.629558 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.629574 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:41Z","lastTransitionTime":"2025-11-29T04:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.732843 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.732903 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.732925 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.732950 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.732969 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:41Z","lastTransitionTime":"2025-11-29T04:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.836410 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.836825 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.837056 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.837281 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.837526 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:41Z","lastTransitionTime":"2025-11-29T04:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.941017 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.941444 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.941612 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.941788 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:41 crc kubenswrapper[4631]: I1129 04:12:41.941950 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:41Z","lastTransitionTime":"2025-11-29T04:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.044984 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.045051 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.045072 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.045099 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.045116 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:42Z","lastTransitionTime":"2025-11-29T04:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.147776 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.148188 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.148464 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.148978 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.149544 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:42Z","lastTransitionTime":"2025-11-29T04:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.215534 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:42 crc kubenswrapper[4631]: E1129 04:12:42.215797 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.257749 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.257823 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.257842 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.257869 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.257887 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:42Z","lastTransitionTime":"2025-11-29T04:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.360996 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.361057 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.361087 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.361103 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.361112 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:42Z","lastTransitionTime":"2025-11-29T04:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.464496 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.464556 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.464574 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.464598 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.464615 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:42Z","lastTransitionTime":"2025-11-29T04:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.567455 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.567508 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.567525 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.567548 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.567565 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:42Z","lastTransitionTime":"2025-11-29T04:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.670768 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.671152 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.671370 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.671597 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.671776 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:42Z","lastTransitionTime":"2025-11-29T04:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.774711 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.774800 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.774819 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.774842 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.774859 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:42Z","lastTransitionTime":"2025-11-29T04:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.877933 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.878006 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.878019 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.878034 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.878045 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:42Z","lastTransitionTime":"2025-11-29T04:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.981060 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.981110 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.981127 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.981148 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:42 crc kubenswrapper[4631]: I1129 04:12:42.981162 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:42Z","lastTransitionTime":"2025-11-29T04:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.084127 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.084572 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.084742 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.084916 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.085102 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:43Z","lastTransitionTime":"2025-11-29T04:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.189111 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.189167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.189185 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.189211 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.189232 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:43Z","lastTransitionTime":"2025-11-29T04:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.216143 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.216230 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:43 crc kubenswrapper[4631]: E1129 04:12:43.217575 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.217701 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:43 crc kubenswrapper[4631]: E1129 04:12:43.217832 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:43 crc kubenswrapper[4631]: E1129 04:12:43.217918 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.236530 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.254598 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"997c5f8c-f979-45a7-a56f-2e451db56e01\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.276695 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.292167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.292440 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.292621 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.292806 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.292993 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:43Z","lastTransitionTime":"2025-11-29T04:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.296138 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.319519 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"2025-11-29T04:11:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7\\\\n2025-11-29T04:11:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7 to /host/opt/cni/bin/\\\\n2025-11-29T04:11:34Z [verbose] multus-daemon started\\\\n2025-11-29T04:11:34Z [verbose] Readiness Indicator file check\\\\n2025-11-29T04:12:19Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.344042 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.366267 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.388069 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.395927 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.396308 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.396494 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.396662 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.396810 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:43Z","lastTransitionTime":"2025-11-29T04:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.408303 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.442213 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:35Z\\\",\\\"message\\\":\\\"ent/informers/externalversions/factory.go:141\\\\nI1129 04:12:35.048100 6579 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048155 6579 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048379 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048942 6579 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048102 6579 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:12:35.049190 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:12:35.049225 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:12:35.049276 6579 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1129 04:12:35.049319 6579 factory.go:656] Stopping watch factory\\\\nI1129 04:12:35.049381 6579 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:12:35.049486 6579 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:12:35.049563 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.459257 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.481406 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.499099 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.499167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.499184 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.499212 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.499229 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:43Z","lastTransitionTime":"2025-11-29T04:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.515867 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"086f29b7-9b42-409a-8591-645d3616320b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6100415a919b68a8cdc6dfbae9d3c391ac6db8e3908f39f789d0d1694a3ddc8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52f43cf26605dac1b088b7580b5447cfab48e84fd32e325e4ef6415215e09661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e827ca3b15395b1539d10999b148ca19dcc72e36e2ce2539e6c66f286ea8148f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62eada265bec693d8dc6f513b807347f2870d5becfb53cdfc0a81c113fcf151b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecbe6fce889ea30339f989f5b40fab5157d4c50a77df9ddb8adcb56223755f73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee7f00ee91110b84305d8aca62531f4ff151324aa48d2d5018b4251db56af872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7f00ee91110b84305d8aca62531f4ff151324aa48d2d5018b4251db56af872\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a688ab04e6e05182db2c2f3bac369094bf7ecf634c117b01efc61fb757f5a938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a688ab04e6e05182db2c2f3bac369094bf7ecf634c117b01efc61fb757f5a938\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9022eec0ea1f259acc9309571973dbc1ea222f0ebf9cda5229aaf33e7ed7b442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9022eec0ea1f259acc9309571973dbc1ea222f0ebf9cda5229aaf33e7ed7b442\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.538365 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.557588 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.570784 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.600763 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.602431 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.602489 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.602509 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.602549 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.602570 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:43Z","lastTransitionTime":"2025-11-29T04:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.616497 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.629752 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:43Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.705200 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.705774 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.705790 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.705807 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.705818 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:43Z","lastTransitionTime":"2025-11-29T04:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.808797 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.808840 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.808851 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.808869 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.808880 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:43Z","lastTransitionTime":"2025-11-29T04:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.912156 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.912197 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.912211 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.912230 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:43 crc kubenswrapper[4631]: I1129 04:12:43.912243 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:43Z","lastTransitionTime":"2025-11-29T04:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.021310 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.021383 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.021400 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.021421 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.021439 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:44Z","lastTransitionTime":"2025-11-29T04:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.124152 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.124204 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.124220 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.124243 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.124260 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:44Z","lastTransitionTime":"2025-11-29T04:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.216420 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:44 crc kubenswrapper[4631]: E1129 04:12:44.216644 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.226978 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.227123 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.227150 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.227180 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.227204 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:44Z","lastTransitionTime":"2025-11-29T04:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.330858 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.330928 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.330954 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.330983 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.331006 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:44Z","lastTransitionTime":"2025-11-29T04:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.434015 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.434107 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.434125 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.434181 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.434199 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:44Z","lastTransitionTime":"2025-11-29T04:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.537055 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.537123 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.537146 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.537173 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.537197 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:44Z","lastTransitionTime":"2025-11-29T04:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.640749 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.640809 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.640833 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.640862 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.640883 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:44Z","lastTransitionTime":"2025-11-29T04:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.743548 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.743654 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.743677 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.743707 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.743728 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:44Z","lastTransitionTime":"2025-11-29T04:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.845960 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.846025 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.846044 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.846069 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.846086 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:44Z","lastTransitionTime":"2025-11-29T04:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.950038 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.950100 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.950121 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.950148 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:44 crc kubenswrapper[4631]: I1129 04:12:44.950166 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:44Z","lastTransitionTime":"2025-11-29T04:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.052260 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.052314 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.052362 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.052386 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.052404 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:45Z","lastTransitionTime":"2025-11-29T04:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.155707 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.155804 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.155864 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.155889 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.155950 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:45Z","lastTransitionTime":"2025-11-29T04:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.216313 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.216414 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.216375 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:45 crc kubenswrapper[4631]: E1129 04:12:45.216550 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:45 crc kubenswrapper[4631]: E1129 04:12:45.216674 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:45 crc kubenswrapper[4631]: E1129 04:12:45.216833 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.258630 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.258694 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.258716 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.258744 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.258764 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:45Z","lastTransitionTime":"2025-11-29T04:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.383898 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.383971 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.383996 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.384025 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.384080 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:45Z","lastTransitionTime":"2025-11-29T04:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.487518 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.487634 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.487661 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.487689 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.487711 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:45Z","lastTransitionTime":"2025-11-29T04:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.590325 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.590436 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.590461 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.590486 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.590503 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:45Z","lastTransitionTime":"2025-11-29T04:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.693732 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.693781 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.693798 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.693822 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.693839 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:45Z","lastTransitionTime":"2025-11-29T04:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.797753 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.797802 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.797819 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.797842 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.797858 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:45Z","lastTransitionTime":"2025-11-29T04:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.900452 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.900530 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.900562 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.900592 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:45 crc kubenswrapper[4631]: I1129 04:12:45.900615 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:45Z","lastTransitionTime":"2025-11-29T04:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.003646 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.003704 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.003723 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.003746 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.003763 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:46Z","lastTransitionTime":"2025-11-29T04:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.291743 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:46 crc kubenswrapper[4631]: E1129 04:12:46.291946 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.293517 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.293612 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.293677 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.293766 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.293830 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:46Z","lastTransitionTime":"2025-11-29T04:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.397033 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.397193 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.397224 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.397324 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.397370 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:46Z","lastTransitionTime":"2025-11-29T04:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.501349 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.501384 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.501392 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.501406 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.501415 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:46Z","lastTransitionTime":"2025-11-29T04:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.604229 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.604287 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.604304 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.604357 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.604375 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:46Z","lastTransitionTime":"2025-11-29T04:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.707476 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.707532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.707550 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.707575 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.707593 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:46Z","lastTransitionTime":"2025-11-29T04:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.811053 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.811110 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.811127 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.811151 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.811168 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:46Z","lastTransitionTime":"2025-11-29T04:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.914159 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.914243 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.914272 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.914302 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:46 crc kubenswrapper[4631]: I1129 04:12:46.914462 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:46Z","lastTransitionTime":"2025-11-29T04:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.017949 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.018009 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.018032 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.018064 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.018088 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.121623 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.121683 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.121748 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.121776 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.121795 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.216795 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.216902 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.216816 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:47 crc kubenswrapper[4631]: E1129 04:12:47.217025 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:47 crc kubenswrapper[4631]: E1129 04:12:47.217170 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:47 crc kubenswrapper[4631]: E1129 04:12:47.217468 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.224145 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.224185 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.224203 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.224225 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.224242 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.327670 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.327741 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.327767 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.327793 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.327811 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.431241 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.431293 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.431311 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.431416 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.431435 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.534865 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.534932 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.534956 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.534990 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.535012 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.638323 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.638445 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.638473 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.638503 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.638525 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.742087 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.742149 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.742171 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.742199 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.742224 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.838198 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.838265 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.838287 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.838315 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.838370 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: E1129 04:12:47.861564 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.866532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.866584 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.866603 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.866629 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.866646 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: E1129 04:12:47.888098 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.894490 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.894551 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.894572 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.894597 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.894614 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: E1129 04:12:47.915007 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.925023 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.925078 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.925095 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.925178 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.925208 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: E1129 04:12:47.945796 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.951404 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.951443 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.951460 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.951483 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.951501 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:47 crc kubenswrapper[4631]: E1129 04:12:47.972882 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:47Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:47 crc kubenswrapper[4631]: E1129 04:12:47.973127 4631 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.975241 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.975285 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.975302 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.975324 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:47 crc kubenswrapper[4631]: I1129 04:12:47.975370 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:47Z","lastTransitionTime":"2025-11-29T04:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.078772 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.078848 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.078868 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.078898 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.078922 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:48Z","lastTransitionTime":"2025-11-29T04:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.182236 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.182300 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.182318 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.182366 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.182383 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:48Z","lastTransitionTime":"2025-11-29T04:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.216103 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:48 crc kubenswrapper[4631]: E1129 04:12:48.216270 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.284879 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.284931 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.284948 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.284973 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.284992 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:48Z","lastTransitionTime":"2025-11-29T04:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.389560 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.389615 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.389633 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.389659 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.389677 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:48Z","lastTransitionTime":"2025-11-29T04:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.492806 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.492868 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.492888 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.492912 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.492928 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:48Z","lastTransitionTime":"2025-11-29T04:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.596002 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.596056 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.596072 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.596096 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.596112 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:48Z","lastTransitionTime":"2025-11-29T04:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.698974 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.699031 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.699047 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.699071 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.699087 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:48Z","lastTransitionTime":"2025-11-29T04:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.801970 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.802059 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.802077 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.802100 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.802116 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:48Z","lastTransitionTime":"2025-11-29T04:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.905716 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.905775 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.905793 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.905817 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:48 crc kubenswrapper[4631]: I1129 04:12:48.905835 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:48Z","lastTransitionTime":"2025-11-29T04:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.009046 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.009094 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.009112 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.009134 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.009151 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:49Z","lastTransitionTime":"2025-11-29T04:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.112172 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.112241 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.112260 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.112284 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.112302 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:49Z","lastTransitionTime":"2025-11-29T04:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.215173 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.215248 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.215273 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.215304 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.215372 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:49Z","lastTransitionTime":"2025-11-29T04:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.215466 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:49 crc kubenswrapper[4631]: E1129 04:12:49.215678 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.215731 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.215786 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:49 crc kubenswrapper[4631]: E1129 04:12:49.215902 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:49 crc kubenswrapper[4631]: E1129 04:12:49.216071 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.317847 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.317899 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.317916 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.317940 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.317957 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:49Z","lastTransitionTime":"2025-11-29T04:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.421409 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.421466 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.421488 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.421519 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.421543 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:49Z","lastTransitionTime":"2025-11-29T04:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.523748 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.523785 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.523794 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.523808 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.523817 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:49Z","lastTransitionTime":"2025-11-29T04:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.626586 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.626637 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.626653 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.626676 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.626692 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:49Z","lastTransitionTime":"2025-11-29T04:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.729170 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.729238 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.729259 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.729289 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.729312 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:49Z","lastTransitionTime":"2025-11-29T04:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.832175 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.832512 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.832708 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.832863 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.833004 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:49Z","lastTransitionTime":"2025-11-29T04:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.936577 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.936623 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.936638 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.936660 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:49 crc kubenswrapper[4631]: I1129 04:12:49.936677 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:49Z","lastTransitionTime":"2025-11-29T04:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.040771 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.040831 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.040855 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.040882 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.040904 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:50Z","lastTransitionTime":"2025-11-29T04:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.143321 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.143406 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.143432 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.143508 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.143529 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:50Z","lastTransitionTime":"2025-11-29T04:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.216220 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:50 crc kubenswrapper[4631]: E1129 04:12:50.217378 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.217842 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:12:50 crc kubenswrapper[4631]: E1129 04:12:50.218138 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.246146 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.246207 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.246224 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.246669 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.246729 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:50Z","lastTransitionTime":"2025-11-29T04:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.349522 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.349598 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.349618 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.349640 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.349657 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:50Z","lastTransitionTime":"2025-11-29T04:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.453758 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.454074 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.454218 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.454395 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.454574 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:50Z","lastTransitionTime":"2025-11-29T04:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.557589 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.557809 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.557969 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.558103 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.558241 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:50Z","lastTransitionTime":"2025-11-29T04:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.661484 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.661727 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.661915 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.662089 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.662285 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:50Z","lastTransitionTime":"2025-11-29T04:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.737675 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:50 crc kubenswrapper[4631]: E1129 04:12:50.737902 4631 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:12:50 crc kubenswrapper[4631]: E1129 04:12:50.738004 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs podName:c6c5bb91-f03c-4672-bc61-69a68b8c89d6 nodeName:}" failed. No retries permitted until 2025-11-29 04:13:54.737981116 +0000 UTC m=+161.802484660 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs") pod "network-metrics-daemon-b6vgh" (UID: "c6c5bb91-f03c-4672-bc61-69a68b8c89d6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.765449 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.765496 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.765514 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.765539 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.765557 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:50Z","lastTransitionTime":"2025-11-29T04:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.868319 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.868681 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.868926 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.869167 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.869711 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:50Z","lastTransitionTime":"2025-11-29T04:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.973196 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.973697 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.973922 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.974102 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:50 crc kubenswrapper[4631]: I1129 04:12:50.974300 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:50Z","lastTransitionTime":"2025-11-29T04:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.077179 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.077239 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.077257 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.077286 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.077304 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:51Z","lastTransitionTime":"2025-11-29T04:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.180004 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.180061 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.180079 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.180102 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.180124 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:51Z","lastTransitionTime":"2025-11-29T04:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.216194 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.216229 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.216446 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:51 crc kubenswrapper[4631]: E1129 04:12:51.216595 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:51 crc kubenswrapper[4631]: E1129 04:12:51.216925 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:51 crc kubenswrapper[4631]: E1129 04:12:51.217055 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.282461 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.282524 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.282542 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.282565 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.282584 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:51Z","lastTransitionTime":"2025-11-29T04:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.385648 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.385701 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.385718 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.385740 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.385756 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:51Z","lastTransitionTime":"2025-11-29T04:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.489249 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.489373 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.489401 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.489432 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.489454 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:51Z","lastTransitionTime":"2025-11-29T04:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.592800 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.592901 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.592919 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.592944 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.592963 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:51Z","lastTransitionTime":"2025-11-29T04:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.696402 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.696459 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.696477 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.696500 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.696517 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:51Z","lastTransitionTime":"2025-11-29T04:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.798601 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.798656 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.798674 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.798697 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.798713 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:51Z","lastTransitionTime":"2025-11-29T04:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.902098 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.902168 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.902197 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.902227 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:51 crc kubenswrapper[4631]: I1129 04:12:51.902250 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:51Z","lastTransitionTime":"2025-11-29T04:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.005374 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.005437 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.005460 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.005488 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.005512 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:52Z","lastTransitionTime":"2025-11-29T04:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.108610 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.108753 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.108780 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.108807 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.108831 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:52Z","lastTransitionTime":"2025-11-29T04:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.212316 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.212414 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.212454 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.212486 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.212509 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:52Z","lastTransitionTime":"2025-11-29T04:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.215691 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:52 crc kubenswrapper[4631]: E1129 04:12:52.216170 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.316128 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.316193 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.316211 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.316234 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.316251 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:52Z","lastTransitionTime":"2025-11-29T04:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.419184 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.419230 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.419239 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.419256 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.419267 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:52Z","lastTransitionTime":"2025-11-29T04:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.522126 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.522161 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.522171 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.522187 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.522198 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:52Z","lastTransitionTime":"2025-11-29T04:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.624561 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.624592 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.624599 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.624611 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.624619 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:52Z","lastTransitionTime":"2025-11-29T04:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.727750 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.727802 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.727811 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.727826 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.727849 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:52Z","lastTransitionTime":"2025-11-29T04:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.831177 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.831225 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.831242 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.831267 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.831283 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:52Z","lastTransitionTime":"2025-11-29T04:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.934376 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.934463 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.934486 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.934519 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:52 crc kubenswrapper[4631]: I1129 04:12:52.934545 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:52Z","lastTransitionTime":"2025-11-29T04:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.037844 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.037905 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.037923 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.037950 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.037968 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:53Z","lastTransitionTime":"2025-11-29T04:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.141142 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.141195 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.141211 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.141237 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.141253 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:53Z","lastTransitionTime":"2025-11-29T04:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.215433 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.215775 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:53 crc kubenswrapper[4631]: E1129 04:12:53.215915 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:53 crc kubenswrapper[4631]: E1129 04:12:53.215772 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.216147 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:53 crc kubenswrapper[4631]: E1129 04:12:53.216368 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.237120 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6a28e3b303b73c6b5542897ae4e467ca38ece669833d0953ef89cc47b37dfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.244161 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.244220 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.244243 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.244272 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.244294 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:53Z","lastTransitionTime":"2025-11-29T04:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.252733 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.269461 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a812429acb73d56ebe1f5baa5c57765605d7b3fa4fb07dd91860c57439cf5e84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d1825fae3181ed51bcaed770352e1c95c3cbf7c8df9b36dda4ee2ba4248f855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.292344 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cda25410-78a0-47a1-894f-621a855bd64a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:35Z\\\",\\\"message\\\":\\\"ent/informers/externalversions/factory.go:141\\\\nI1129 04:12:35.048100 6579 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048155 6579 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048379 6579 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048942 6579 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1129 04:12:35.048102 6579 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1129 04:12:35.049190 6579 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1129 04:12:35.049225 6579 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1129 04:12:35.049276 6579 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1129 04:12:35.049319 6579 factory.go:656] Stopping watch factory\\\\nI1129 04:12:35.049381 6579 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1129 04:12:35.049486 6579 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1129 04:12:35.049563 6579 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:12:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ffhfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2npl6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.311439 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-jgxpj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d99f974e-ba9c-4600-81c3-42c629af0c1b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e00a8e110ba81f431d95d55afc790950a80d875f636a1b1ae0a7b89ac4b3435\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zg8w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-jgxpj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.323254 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.334900 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0973ce34-1f3e-4c8a-a7a7-5c6af50c105c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1129 04:11:26.443740 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1129 04:11:26.452370 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4021631713/tls.crt::/tmp/serving-cert-4021631713/tls.key\\\\\\\"\\\\nI1129 04:11:31.791479 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1129 04:11:31.794786 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1129 04:11:31.794801 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1129 04:11:31.794822 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1129 04:11:31.794827 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1129 04:11:31.816163 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1129 04:11:31.816190 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816195 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1129 04:11:31.816202 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1129 04:11:31.816205 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1129 04:11:31.816208 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1129 04:11:31.816216 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1129 04:11:31.816360 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1129 04:11:31.819158 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.348862 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c36a691-f335-4504-94fa-5c4a440395e8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1d2c0bc030c720a2d95609a658185a2a53314011f484776bac2c261ed0f2097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc5abb7a99d364bb4a2c7936aa014b27a24878d30ba6f76600cf3a2c9c9eacc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6498acc003211ca6f201b49337de2fa6ee0d4445a0d87116630f797cc9dd5663\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.349991 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.350027 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.350040 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.350058 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.350073 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:53Z","lastTransitionTime":"2025-11-29T04:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.363982 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd983e9b-92e6-41c8-ae19-4f28c141ba51\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8d4891fba7090a9ea12fa27ea8dc917ea7e200c86ec5b7c4726414d96044b77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6ef63251741e9b4e982009d2a022115961d3a0345d902956975cd9d7709598f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h4vzb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-77lq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.379295 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:46Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bk256\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:46Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-b6vgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.411459 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"086f29b7-9b42-409a-8591-645d3616320b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6100415a919b68a8cdc6dfbae9d3c391ac6db8e3908f39f789d0d1694a3ddc8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52f43cf26605dac1b088b7580b5447cfab48e84fd32e325e4ef6415215e09661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e827ca3b15395b1539d10999b148ca19dcc72e36e2ce2539e6c66f286ea8148f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62eada265bec693d8dc6f513b807347f2870d5becfb53cdfc0a81c113fcf151b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecbe6fce889ea30339f989f5b40fab5157d4c50a77df9ddb8adcb56223755f73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ee7f00ee91110b84305d8aca62531f4ff151324aa48d2d5018b4251db56af872\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7f00ee91110b84305d8aca62531f4ff151324aa48d2d5018b4251db56af872\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a688ab04e6e05182db2c2f3bac369094bf7ecf634c117b01efc61fb757f5a938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a688ab04e6e05182db2c2f3bac369094bf7ecf634c117b01efc61fb757f5a938\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9022eec0ea1f259acc9309571973dbc1ea222f0ebf9cda5229aaf33e7ed7b442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9022eec0ea1f259acc9309571973dbc1ea222f0ebf9cda5229aaf33e7ed7b442\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.426923 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.438743 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.453122 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.453199 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.453216 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.453272 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.453290 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:53Z","lastTransitionTime":"2025-11-29T04:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.461056 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.475537 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"997c5f8c-f979-45a7-a56f-2e451db56e01\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.492695 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.510274 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3e768e90a8a8bb0f37dbebea8d88b9f3a3cd771a90bd065967939160de74719\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.530319 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-pbk6b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f871e13-bbe2-4104-8f40-70e695653fef\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-29T04:12:19Z\\\",\\\"message\\\":\\\"2025-11-29T04:11:34+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7\\\\n2025-11-29T04:11:34+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d6f9a5a2-ce16-43eb-a96a-7e69fbb553e7 to /host/opt/cni/bin/\\\\n2025-11-29T04:11:34Z [verbose] multus-daemon started\\\\n2025-11-29T04:11:34Z [verbose] Readiness Indicator file check\\\\n2025-11-29T04:12:19Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x6w8m\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-pbk6b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.550879 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b2937c2f-00bd-4224-a77c-ef76ab93890f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5e1d194c44dde00347b893c84ff645ae8470eb6da5025e64bab65d4c5b27a00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://847ab382fe12e06782bf4b9fb6dabf6f8261f1d9f75ba325ed550ffcc58472d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f003ef969f372dfb3c369d13c9d017c447c6bd836408fffff34aade9f333f88f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7caf8b755167225a1d776742bcb29d5cb8ed8ba8a61d1f4e865996449943480b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:53Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.555981 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.556067 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.556087 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.556110 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.556157 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:53Z","lastTransitionTime":"2025-11-29T04:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.658658 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.658728 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.658745 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.658794 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.658812 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:53Z","lastTransitionTime":"2025-11-29T04:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.762005 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.762087 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.762105 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.762128 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.762147 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:53Z","lastTransitionTime":"2025-11-29T04:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.864753 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.864828 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.864855 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.864888 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.864911 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:53Z","lastTransitionTime":"2025-11-29T04:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.967784 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.967848 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.967872 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.967901 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:53 crc kubenswrapper[4631]: I1129 04:12:53.967923 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:53Z","lastTransitionTime":"2025-11-29T04:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.071280 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.071358 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.071377 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.071401 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.071416 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:54Z","lastTransitionTime":"2025-11-29T04:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.174666 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.174718 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.174741 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.174766 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.174787 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:54Z","lastTransitionTime":"2025-11-29T04:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.216130 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:54 crc kubenswrapper[4631]: E1129 04:12:54.216315 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.278424 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.278490 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.278511 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.278545 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.278567 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:54Z","lastTransitionTime":"2025-11-29T04:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.382237 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.382738 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.382964 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.383190 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.383479 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:54Z","lastTransitionTime":"2025-11-29T04:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.486727 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.486785 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.486807 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.486832 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.486852 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:54Z","lastTransitionTime":"2025-11-29T04:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.589838 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.589893 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.589911 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.589960 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.589977 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:54Z","lastTransitionTime":"2025-11-29T04:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.693224 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.693289 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.693308 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.693366 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.693394 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:54Z","lastTransitionTime":"2025-11-29T04:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.796427 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.796498 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.796519 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.796548 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.796570 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:54Z","lastTransitionTime":"2025-11-29T04:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.898726 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.898800 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.898822 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.898851 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:54 crc kubenswrapper[4631]: I1129 04:12:54.898873 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:54Z","lastTransitionTime":"2025-11-29T04:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.001249 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.001312 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.001358 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.001390 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.001733 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:55Z","lastTransitionTime":"2025-11-29T04:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.104533 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.104584 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.104597 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.104615 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.104626 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:55Z","lastTransitionTime":"2025-11-29T04:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.207067 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.207880 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.208063 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.208212 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.208410 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:55Z","lastTransitionTime":"2025-11-29T04:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.216612 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.216836 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.216638 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:55 crc kubenswrapper[4631]: E1129 04:12:55.217050 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:55 crc kubenswrapper[4631]: E1129 04:12:55.217207 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:55 crc kubenswrapper[4631]: E1129 04:12:55.217406 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.311501 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.311551 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.311567 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.311589 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.311606 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:55Z","lastTransitionTime":"2025-11-29T04:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.414911 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.414961 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.414977 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.415001 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.415017 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:55Z","lastTransitionTime":"2025-11-29T04:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.518453 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.518509 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.518532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.518556 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.518576 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:55Z","lastTransitionTime":"2025-11-29T04:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.621637 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.621699 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.621721 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.621786 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.621807 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:55Z","lastTransitionTime":"2025-11-29T04:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.725017 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.725073 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.725090 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.725109 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.725125 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:55Z","lastTransitionTime":"2025-11-29T04:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.827808 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.827868 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.827935 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.827968 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.827988 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:55Z","lastTransitionTime":"2025-11-29T04:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.930801 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.930868 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.930887 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.930913 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:55 crc kubenswrapper[4631]: I1129 04:12:55.930932 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:55Z","lastTransitionTime":"2025-11-29T04:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.034270 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.034320 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.034373 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.034398 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.034415 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:56Z","lastTransitionTime":"2025-11-29T04:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.137054 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.137110 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.137127 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.137151 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.137170 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:56Z","lastTransitionTime":"2025-11-29T04:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.216211 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:56 crc kubenswrapper[4631]: E1129 04:12:56.216495 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.239584 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.239637 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.239653 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.239676 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.239721 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:56Z","lastTransitionTime":"2025-11-29T04:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.343200 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.343274 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.343300 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.343362 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.343388 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:56Z","lastTransitionTime":"2025-11-29T04:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.446179 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.446227 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.446244 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.446267 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.446284 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:56Z","lastTransitionTime":"2025-11-29T04:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.549304 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.549387 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.549406 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.549430 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.549449 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:56Z","lastTransitionTime":"2025-11-29T04:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.652115 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.652173 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.652190 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.652214 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.652232 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:56Z","lastTransitionTime":"2025-11-29T04:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.755597 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.755652 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.755669 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.755692 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.755710 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:56Z","lastTransitionTime":"2025-11-29T04:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.858078 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.858124 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.858140 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.858162 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.858181 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:56Z","lastTransitionTime":"2025-11-29T04:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.961400 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.961504 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.961526 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.961551 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:56 crc kubenswrapper[4631]: I1129 04:12:56.961569 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:56Z","lastTransitionTime":"2025-11-29T04:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.064415 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.064472 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.064491 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.064512 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.064528 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:57Z","lastTransitionTime":"2025-11-29T04:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.167380 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.167432 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.167453 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.167482 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.167505 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:57Z","lastTransitionTime":"2025-11-29T04:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.216456 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:57 crc kubenswrapper[4631]: E1129 04:12:57.216642 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.216730 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.216746 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:57 crc kubenswrapper[4631]: E1129 04:12:57.216925 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:57 crc kubenswrapper[4631]: E1129 04:12:57.216996 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.270189 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.270244 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.270266 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.270294 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.270319 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:57Z","lastTransitionTime":"2025-11-29T04:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.372754 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.372849 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.372877 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.372909 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.372935 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:57Z","lastTransitionTime":"2025-11-29T04:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.475699 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.475778 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.475802 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.475831 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.475853 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:57Z","lastTransitionTime":"2025-11-29T04:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.578970 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.579034 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.579063 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.579093 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.579117 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:57Z","lastTransitionTime":"2025-11-29T04:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.682301 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.682391 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.682409 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.682431 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.682448 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:57Z","lastTransitionTime":"2025-11-29T04:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.784927 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.784976 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.784988 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.785006 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.785019 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:57Z","lastTransitionTime":"2025-11-29T04:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.893024 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.893085 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.893398 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.893439 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.893458 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:57Z","lastTransitionTime":"2025-11-29T04:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.996194 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.996265 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.996283 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.996308 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:57 crc kubenswrapper[4631]: I1129 04:12:57.996326 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:57Z","lastTransitionTime":"2025-11-29T04:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.098953 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.099014 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.099038 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.099068 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.099090 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.201876 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.201930 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.201947 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.201970 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.201987 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.209291 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.209398 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.209466 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.209497 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.209520 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.215653 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:12:58 crc kubenswrapper[4631]: E1129 04:12:58.215872 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:12:58 crc kubenswrapper[4631]: E1129 04:12:58.230814 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:58Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.235601 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.235646 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.235664 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.235686 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.235704 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: E1129 04:12:58.254636 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:58Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.259362 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.259443 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.259455 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.259473 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.259484 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: E1129 04:12:58.276412 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:58Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.280545 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.280585 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.280594 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.280610 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.280620 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: E1129 04:12:58.296624 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:58Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.301223 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.301265 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.301281 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.301303 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.301319 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: E1129 04:12:58.318484 4631 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-29T04:12:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"26b847b6-4c18-4480-bfc6-a52029f99f22\\\",\\\"systemUUID\\\":\\\"06b81a2a-46c7-4ed7-b163-1df3ee4c2427\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:12:58Z is after 2025-08-24T17:21:41Z" Nov 29 04:12:58 crc kubenswrapper[4631]: E1129 04:12:58.318811 4631 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.320941 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.321004 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.321025 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.321052 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.321070 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.426660 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.426740 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.426752 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.426777 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.426791 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.531156 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.531230 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.531259 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.531286 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.531303 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.634556 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.634663 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.634692 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.634721 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.634740 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.737666 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.737723 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.737740 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.737766 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.737784 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.840872 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.840947 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.840964 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.840992 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.841009 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.944176 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.944234 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.944250 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.944276 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:58 crc kubenswrapper[4631]: I1129 04:12:58.944296 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:58Z","lastTransitionTime":"2025-11-29T04:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.047121 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.047276 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.047293 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.047318 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.047377 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:59Z","lastTransitionTime":"2025-11-29T04:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.149965 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.150036 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.150054 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.150086 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.150105 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:59Z","lastTransitionTime":"2025-11-29T04:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.215779 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.215922 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.215950 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:12:59 crc kubenswrapper[4631]: E1129 04:12:59.216109 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:12:59 crc kubenswrapper[4631]: E1129 04:12:59.216270 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:12:59 crc kubenswrapper[4631]: E1129 04:12:59.216385 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.252787 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.252845 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.252863 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.252887 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.252904 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:59Z","lastTransitionTime":"2025-11-29T04:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.356052 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.356118 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.356137 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.356161 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.356179 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:59Z","lastTransitionTime":"2025-11-29T04:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.459816 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.459872 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.459890 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.459914 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.459933 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:59Z","lastTransitionTime":"2025-11-29T04:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.562735 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.562779 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.562797 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.562821 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.562840 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:59Z","lastTransitionTime":"2025-11-29T04:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.666021 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.666066 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.666082 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.666106 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.666124 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:59Z","lastTransitionTime":"2025-11-29T04:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.769279 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.769423 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.769452 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.769481 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.769505 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:59Z","lastTransitionTime":"2025-11-29T04:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.871978 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.872060 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.872087 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.872117 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.872145 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:59Z","lastTransitionTime":"2025-11-29T04:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.975079 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.975136 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.975152 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.975175 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:12:59 crc kubenswrapper[4631]: I1129 04:12:59.975192 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:12:59Z","lastTransitionTime":"2025-11-29T04:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.078045 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.078107 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.078124 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.078148 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.078210 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:00Z","lastTransitionTime":"2025-11-29T04:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.181447 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.181515 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.181537 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.181565 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.181611 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:00Z","lastTransitionTime":"2025-11-29T04:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.215633 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:00 crc kubenswrapper[4631]: E1129 04:13:00.215826 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.284259 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.284319 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.284382 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.284408 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.284429 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:00Z","lastTransitionTime":"2025-11-29T04:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.387854 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.387920 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.387944 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.387973 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.387995 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:00Z","lastTransitionTime":"2025-11-29T04:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.535773 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.536248 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.536487 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.536687 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.536892 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:00Z","lastTransitionTime":"2025-11-29T04:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.641199 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.641250 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.641267 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.641291 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.641307 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:00Z","lastTransitionTime":"2025-11-29T04:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.744316 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.744431 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.744453 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.744482 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.744505 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:00Z","lastTransitionTime":"2025-11-29T04:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.847617 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.847675 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.847692 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.847717 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.847733 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:00Z","lastTransitionTime":"2025-11-29T04:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.950975 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.951068 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.951087 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.951142 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:00 crc kubenswrapper[4631]: I1129 04:13:00.951161 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:00Z","lastTransitionTime":"2025-11-29T04:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.054063 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.054117 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.054133 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.054156 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.054173 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:01Z","lastTransitionTime":"2025-11-29T04:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.156927 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.156977 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.156995 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.157019 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.157037 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:01Z","lastTransitionTime":"2025-11-29T04:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.215870 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:01 crc kubenswrapper[4631]: E1129 04:13:01.216043 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.216315 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:01 crc kubenswrapper[4631]: E1129 04:13:01.216452 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.216620 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:01 crc kubenswrapper[4631]: E1129 04:13:01.216766 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.260102 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.260157 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.260175 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.260197 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.260215 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:01Z","lastTransitionTime":"2025-11-29T04:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.363658 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.363702 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.363726 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.363750 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.363766 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:01Z","lastTransitionTime":"2025-11-29T04:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.466468 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.466545 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.466567 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.466595 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.466619 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:01Z","lastTransitionTime":"2025-11-29T04:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.569994 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.570054 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.570072 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.570098 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.570116 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:01Z","lastTransitionTime":"2025-11-29T04:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.678494 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.678551 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.678570 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.678592 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.678610 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:01Z","lastTransitionTime":"2025-11-29T04:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.781482 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.781537 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.781555 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.781579 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.781596 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:01Z","lastTransitionTime":"2025-11-29T04:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.884280 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.884364 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.884382 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.884406 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.884423 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:01Z","lastTransitionTime":"2025-11-29T04:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.987086 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.987193 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.987499 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.987549 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:01 crc kubenswrapper[4631]: I1129 04:13:01.987569 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:01Z","lastTransitionTime":"2025-11-29T04:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.090729 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.090789 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.090806 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.090875 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.090893 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:02Z","lastTransitionTime":"2025-11-29T04:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.193714 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.193776 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.193794 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.193818 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.193837 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:02Z","lastTransitionTime":"2025-11-29T04:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.216066 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:02 crc kubenswrapper[4631]: E1129 04:13:02.216280 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.217436 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:13:02 crc kubenswrapper[4631]: E1129 04:13:02.217716 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.297805 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.297869 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.297886 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.297910 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.297928 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:02Z","lastTransitionTime":"2025-11-29T04:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.400515 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.400598 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.400653 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.400684 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.400702 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:02Z","lastTransitionTime":"2025-11-29T04:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.503629 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.503944 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.504141 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.504289 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.504461 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:02Z","lastTransitionTime":"2025-11-29T04:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.608485 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.608555 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.608579 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.608612 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.608634 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:02Z","lastTransitionTime":"2025-11-29T04:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.711824 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.712240 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.712424 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.712570 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.712707 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:02Z","lastTransitionTime":"2025-11-29T04:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.816582 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.816651 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.816670 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.816697 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.816717 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:02Z","lastTransitionTime":"2025-11-29T04:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.920100 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.920465 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.920659 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.920815 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:02 crc kubenswrapper[4631]: I1129 04:13:02.920956 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:02Z","lastTransitionTime":"2025-11-29T04:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.024519 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.024582 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.024601 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.024626 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.024644 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:03Z","lastTransitionTime":"2025-11-29T04:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.127677 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.127757 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.127784 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.127817 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.127839 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:03Z","lastTransitionTime":"2025-11-29T04:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.216051 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:03 crc kubenswrapper[4631]: E1129 04:13:03.216251 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.216313 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.216635 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:03 crc kubenswrapper[4631]: E1129 04:13:03.217194 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:03 crc kubenswrapper[4631]: E1129 04:13:03.217387 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.230524 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.230587 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.230612 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.230636 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.230654 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:03Z","lastTransitionTime":"2025-11-29T04:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.238546 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cddaf389-3216-4be7-a91d-8bed4a7bb9e9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23a5e6157a63aff7b0f73eb3402547a956a9927d838b91effda926cf0044bf3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fd5s\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bmtd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:13:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.255125 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5fvhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c91a384b-14a6-429c-a5f3-81f62e36d97d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca06e40670f30cfa82b83dd980ce95bfdad5eedb25189a8a07c0ee65068746d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4zplv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5fvhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:13:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.278390 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83a25be5-2626-40c4-9f04-e74d576e22d7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b872cd7964fbbb4b7ee64283bdc69a5f3e2162cfe505e91bce46dec1c95ec393\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ccfc7d5e47801d676990ac3e9804a1110fefeb7fdf25f82efa767a766dc901c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://870b499179fbd485408623fee2ce674a26c71972d4d3c79c5a7c97ebbf36e7fc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b1178d4bf6eaa82fe0d710b8e677c55d51e7abaa16b0098295d3e31fee2aaf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bedc1c4f36e32088eff08ada9bb60252515f6dc3301bada7991c7374e2b2135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bdaa8884628fa9ea8c24642c36d8a884500c3c8badb5a0019fe55bd2035534e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a77263a8111a33b14701b8633c67f21be636715bfea9fdee62972fb732c43619\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8c5wb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kcgbc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:13:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.294566 4631 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"997c5f8c-f979-45a7-a56f-2e451db56e01\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-29T04:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89dc336c93f8ca02f015a6bebe96930a1a816c778032dca63b46eb35821c97be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3248260b9a86f25e65351c53f8868dc9be0684bce5faaa8385a3fb812d87fd3c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-29T04:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-29T04:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-29T04:11:13Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-29T04:13:03Z is after 2025-08-24T17:21:41Z" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.337844 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.337922 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.337950 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.337983 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.338001 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:03Z","lastTransitionTime":"2025-11-29T04:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.411802 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=58.411770562 podStartE2EDuration="58.411770562s" podCreationTimestamp="2025-11-29 04:12:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:03.411735652 +0000 UTC m=+110.476239206" watchObservedRunningTime="2025-11-29 04:13:03.411770562 +0000 UTC m=+110.476274126" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.412090 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-pbk6b" podStartSLOduration=92.412074 podStartE2EDuration="1m32.412074s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:03.383457344 +0000 UTC m=+110.447960918" watchObservedRunningTime="2025-11-29 04:13:03.412074 +0000 UTC m=+110.476577554" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.441556 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.441633 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.441693 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.441707 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.441716 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:03Z","lastTransitionTime":"2025-11-29T04:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.544593 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.544643 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.544657 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.544674 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.544689 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:03Z","lastTransitionTime":"2025-11-29T04:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.553843 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-jgxpj" podStartSLOduration=92.553826065 podStartE2EDuration="1m32.553826065s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:03.539534013 +0000 UTC m=+110.604037537" watchObservedRunningTime="2025-11-29 04:13:03.553826065 +0000 UTC m=+110.618329589" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.577258 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=91.577237403 podStartE2EDuration="1m31.577237403s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:03.577085339 +0000 UTC m=+110.641588893" watchObservedRunningTime="2025-11-29 04:13:03.577237403 +0000 UTC m=+110.641740927" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.616013 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=91.615996158 podStartE2EDuration="1m31.615996158s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:03.593864493 +0000 UTC m=+110.658368037" watchObservedRunningTime="2025-11-29 04:13:03.615996158 +0000 UTC m=+110.680499682" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.630189 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-77lq2" podStartSLOduration=91.630161778 podStartE2EDuration="1m31.630161778s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:03.615343402 +0000 UTC m=+110.679846916" watchObservedRunningTime="2025-11-29 04:13:03.630161778 +0000 UTC m=+110.694665332" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.647563 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.647649 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.647669 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.647694 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.647713 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:03Z","lastTransitionTime":"2025-11-29T04:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.653351 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=27.653315889 podStartE2EDuration="27.653315889s" podCreationTimestamp="2025-11-29 04:12:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:03.652891398 +0000 UTC m=+110.717394952" watchObservedRunningTime="2025-11-29 04:13:03.653315889 +0000 UTC m=+110.717819403" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.750539 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.750609 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.750631 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.750658 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.750675 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:03Z","lastTransitionTime":"2025-11-29T04:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.852665 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.852708 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.852724 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.852747 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.852763 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:03Z","lastTransitionTime":"2025-11-29T04:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.955092 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.955131 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.955142 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.955158 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:03 crc kubenswrapper[4631]: I1129 04:13:03.955168 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:03Z","lastTransitionTime":"2025-11-29T04:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.057653 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.057900 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.058105 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.058194 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.058287 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:04Z","lastTransitionTime":"2025-11-29T04:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.161370 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.161399 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.161407 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.161422 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.161432 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:04Z","lastTransitionTime":"2025-11-29T04:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.216601 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:04 crc kubenswrapper[4631]: E1129 04:13:04.216790 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.263685 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.263739 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.263758 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.263781 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.263799 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:04Z","lastTransitionTime":"2025-11-29T04:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.367168 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.367558 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.367723 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.367916 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.368091 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:04Z","lastTransitionTime":"2025-11-29T04:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.471156 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.471215 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.471227 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.471246 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.471259 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:04Z","lastTransitionTime":"2025-11-29T04:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.573995 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.574026 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.574034 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.574047 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.574055 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:04Z","lastTransitionTime":"2025-11-29T04:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.677196 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.677246 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.677299 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.677356 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.677375 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:04Z","lastTransitionTime":"2025-11-29T04:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.780846 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.780904 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.780925 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.780954 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.780975 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:04Z","lastTransitionTime":"2025-11-29T04:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.883473 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.883532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.883548 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.883573 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.883595 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:04Z","lastTransitionTime":"2025-11-29T04:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.987115 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.987175 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.987197 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.987219 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:04 crc kubenswrapper[4631]: I1129 04:13:04.987236 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:04Z","lastTransitionTime":"2025-11-29T04:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.090193 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.090256 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.090284 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.090314 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.090366 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:05Z","lastTransitionTime":"2025-11-29T04:13:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.193236 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.193289 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.193306 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.193356 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.193376 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:05Z","lastTransitionTime":"2025-11-29T04:13:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.215999 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.215995 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:05 crc kubenswrapper[4631]: E1129 04:13:05.216178 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.216254 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:05 crc kubenswrapper[4631]: E1129 04:13:05.216453 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:05 crc kubenswrapper[4631]: E1129 04:13:05.216610 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.296621 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.296676 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.296693 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.296718 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.296735 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:05Z","lastTransitionTime":"2025-11-29T04:13:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.400119 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.400176 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.400192 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.400214 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.400233 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:05Z","lastTransitionTime":"2025-11-29T04:13:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.503849 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.503909 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.503926 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.503949 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.503966 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:05Z","lastTransitionTime":"2025-11-29T04:13:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.607181 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.607251 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.607274 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.607303 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.607323 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:05Z","lastTransitionTime":"2025-11-29T04:13:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.710039 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.710101 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.710118 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.710142 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.710159 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:05Z","lastTransitionTime":"2025-11-29T04:13:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.813227 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.813316 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.813387 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.813423 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.813446 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:05Z","lastTransitionTime":"2025-11-29T04:13:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.881548 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-pbk6b_7f871e13-bbe2-4104-8f40-70e695653fef/kube-multus/1.log" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.882564 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-pbk6b_7f871e13-bbe2-4104-8f40-70e695653fef/kube-multus/0.log" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.882678 4631 generic.go:334] "Generic (PLEG): container finished" podID="7f871e13-bbe2-4104-8f40-70e695653fef" containerID="73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614" exitCode=1 Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.882733 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-pbk6b" event={"ID":"7f871e13-bbe2-4104-8f40-70e695653fef","Type":"ContainerDied","Data":"73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.882791 4631 scope.go:117] "RemoveContainer" containerID="323e49056014c7e466e890de2da6db73731d4d3d58d8c433d170c33d203db45e" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.884017 4631 scope.go:117] "RemoveContainer" containerID="73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614" Nov 29 04:13:05 crc kubenswrapper[4631]: E1129 04:13:05.884843 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-pbk6b_openshift-multus(7f871e13-bbe2-4104-8f40-70e695653fef)\"" pod="openshift-multus/multus-pbk6b" podUID="7f871e13-bbe2-4104-8f40-70e695653fef" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.916955 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.917015 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.917038 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.917066 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.917083 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:05Z","lastTransitionTime":"2025-11-29T04:13:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.934478 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=46.934452485 podStartE2EDuration="46.934452485s" podCreationTimestamp="2025-11-29 04:12:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:05.905155383 +0000 UTC m=+112.969658927" watchObservedRunningTime="2025-11-29 04:13:05.934452485 +0000 UTC m=+112.998956039" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.966690 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-kcgbc" podStartSLOduration=94.96665619 podStartE2EDuration="1m34.96665619s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:05.965894301 +0000 UTC m=+113.030397905" watchObservedRunningTime="2025-11-29 04:13:05.96665619 +0000 UTC m=+113.031159744" Nov 29 04:13:05 crc kubenswrapper[4631]: I1129 04:13:05.989503 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podStartSLOduration=94.989479772 podStartE2EDuration="1m34.989479772s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:05.988620171 +0000 UTC m=+113.053123715" watchObservedRunningTime="2025-11-29 04:13:05.989479772 +0000 UTC m=+113.053983326" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.004200 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-5fvhl" podStartSLOduration=95.004177175 podStartE2EDuration="1m35.004177175s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:06.00319259 +0000 UTC m=+113.067696144" watchObservedRunningTime="2025-11-29 04:13:06.004177175 +0000 UTC m=+113.068680729" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.021008 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.021118 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.021139 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.021200 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.021222 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:06Z","lastTransitionTime":"2025-11-29T04:13:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.124370 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.124436 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.124458 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.124488 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.124508 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:06Z","lastTransitionTime":"2025-11-29T04:13:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.216273 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:06 crc kubenswrapper[4631]: E1129 04:13:06.216491 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.227926 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.227977 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.227994 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.228017 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.228033 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:06Z","lastTransitionTime":"2025-11-29T04:13:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.330898 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.330957 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.330973 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.330997 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.331013 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:06Z","lastTransitionTime":"2025-11-29T04:13:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.434192 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.434258 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.434277 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.434302 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.434321 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:06Z","lastTransitionTime":"2025-11-29T04:13:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.536918 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.537996 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.538148 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.538290 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.538503 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:06Z","lastTransitionTime":"2025-11-29T04:13:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.641254 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.641292 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.641308 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.641367 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.641413 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:06Z","lastTransitionTime":"2025-11-29T04:13:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.746804 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.746861 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.746878 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.746902 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.746921 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:06Z","lastTransitionTime":"2025-11-29T04:13:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.850532 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.850590 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.850610 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.850633 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.850648 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:06Z","lastTransitionTime":"2025-11-29T04:13:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.888717 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-pbk6b_7f871e13-bbe2-4104-8f40-70e695653fef/kube-multus/1.log" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.954200 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.954262 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.954280 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.954304 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:06 crc kubenswrapper[4631]: I1129 04:13:06.954325 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:06Z","lastTransitionTime":"2025-11-29T04:13:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.058268 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.058349 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.058369 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.058397 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.058428 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:07Z","lastTransitionTime":"2025-11-29T04:13:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.161641 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.161732 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.161749 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.161778 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.161802 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:07Z","lastTransitionTime":"2025-11-29T04:13:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.216489 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.216591 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.216664 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:07 crc kubenswrapper[4631]: E1129 04:13:07.216827 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:07 crc kubenswrapper[4631]: E1129 04:13:07.216956 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:07 crc kubenswrapper[4631]: E1129 04:13:07.217096 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.264436 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.264550 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.264572 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.264595 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.264614 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:07Z","lastTransitionTime":"2025-11-29T04:13:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.367977 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.368030 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.368051 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.368073 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.368092 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:07Z","lastTransitionTime":"2025-11-29T04:13:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.471387 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.471456 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.471479 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.471511 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.471528 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:07Z","lastTransitionTime":"2025-11-29T04:13:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.574448 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.574497 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.574514 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.574535 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.574552 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:07Z","lastTransitionTime":"2025-11-29T04:13:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.677567 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.677616 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.677633 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.677655 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.677671 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:07Z","lastTransitionTime":"2025-11-29T04:13:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.780458 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.780508 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.780524 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.780547 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.780565 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:07Z","lastTransitionTime":"2025-11-29T04:13:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.884428 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.884479 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.884496 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.884521 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.884538 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:07Z","lastTransitionTime":"2025-11-29T04:13:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.987687 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.987766 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.987799 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.987826 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:07 crc kubenswrapper[4631]: I1129 04:13:07.987844 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:07Z","lastTransitionTime":"2025-11-29T04:13:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.090536 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.090572 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.090584 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.090599 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.090609 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:08Z","lastTransitionTime":"2025-11-29T04:13:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.192999 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.193055 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.193072 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.193095 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.193111 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:08Z","lastTransitionTime":"2025-11-29T04:13:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.215681 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:08 crc kubenswrapper[4631]: E1129 04:13:08.215837 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.296474 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.296531 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.296549 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.296573 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.296592 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:08Z","lastTransitionTime":"2025-11-29T04:13:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.346863 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.346913 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.346930 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.346953 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.346970 4631 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-29T04:13:08Z","lastTransitionTime":"2025-11-29T04:13:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.420820 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth"] Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.421370 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.423408 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.424555 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.424775 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.424647 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.564504 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1b703165-9ec0-44d2-bfb2-b3715452178f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.564857 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b703165-9ec0-44d2-bfb2-b3715452178f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.565039 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1b703165-9ec0-44d2-bfb2-b3715452178f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.565250 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b703165-9ec0-44d2-bfb2-b3715452178f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.565435 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1b703165-9ec0-44d2-bfb2-b3715452178f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.666450 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b703165-9ec0-44d2-bfb2-b3715452178f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.666528 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1b703165-9ec0-44d2-bfb2-b3715452178f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.666623 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b703165-9ec0-44d2-bfb2-b3715452178f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.666659 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1b703165-9ec0-44d2-bfb2-b3715452178f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.666704 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1b703165-9ec0-44d2-bfb2-b3715452178f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.666790 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1b703165-9ec0-44d2-bfb2-b3715452178f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.667846 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1b703165-9ec0-44d2-bfb2-b3715452178f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.668177 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1b703165-9ec0-44d2-bfb2-b3715452178f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.678168 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b703165-9ec0-44d2-bfb2-b3715452178f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.698533 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b703165-9ec0-44d2-bfb2-b3715452178f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-f4kth\" (UID: \"1b703165-9ec0-44d2-bfb2-b3715452178f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.737025 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" Nov 29 04:13:08 crc kubenswrapper[4631]: W1129 04:13:08.761783 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b703165_9ec0_44d2_bfb2_b3715452178f.slice/crio-11808001af979b2f6043d47edd93587a2475881df3e0415b2fc358519e805e9a WatchSource:0}: Error finding container 11808001af979b2f6043d47edd93587a2475881df3e0415b2fc358519e805e9a: Status 404 returned error can't find the container with id 11808001af979b2f6043d47edd93587a2475881df3e0415b2fc358519e805e9a Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.899312 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" event={"ID":"1b703165-9ec0-44d2-bfb2-b3715452178f","Type":"ContainerStarted","Data":"7c04919c69038334187196856650124089a2ea07af0d73208db9600b5891748d"} Nov 29 04:13:08 crc kubenswrapper[4631]: I1129 04:13:08.899375 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" event={"ID":"1b703165-9ec0-44d2-bfb2-b3715452178f","Type":"ContainerStarted","Data":"11808001af979b2f6043d47edd93587a2475881df3e0415b2fc358519e805e9a"} Nov 29 04:13:09 crc kubenswrapper[4631]: I1129 04:13:09.215982 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:09 crc kubenswrapper[4631]: I1129 04:13:09.216116 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:09 crc kubenswrapper[4631]: E1129 04:13:09.216798 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:09 crc kubenswrapper[4631]: I1129 04:13:09.216200 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:09 crc kubenswrapper[4631]: E1129 04:13:09.217006 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:09 crc kubenswrapper[4631]: E1129 04:13:09.217317 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:10 crc kubenswrapper[4631]: I1129 04:13:10.216279 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:10 crc kubenswrapper[4631]: E1129 04:13:10.216490 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:11 crc kubenswrapper[4631]: I1129 04:13:11.216009 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:11 crc kubenswrapper[4631]: I1129 04:13:11.216067 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:11 crc kubenswrapper[4631]: I1129 04:13:11.216081 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:11 crc kubenswrapper[4631]: E1129 04:13:11.216798 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:11 crc kubenswrapper[4631]: E1129 04:13:11.216874 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:11 crc kubenswrapper[4631]: E1129 04:13:11.216585 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:12 crc kubenswrapper[4631]: I1129 04:13:12.215660 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:12 crc kubenswrapper[4631]: E1129 04:13:12.215795 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:13 crc kubenswrapper[4631]: E1129 04:13:13.168301 4631 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 29 04:13:13 crc kubenswrapper[4631]: I1129 04:13:13.215790 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:13 crc kubenswrapper[4631]: I1129 04:13:13.215870 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:13 crc kubenswrapper[4631]: I1129 04:13:13.215810 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:13 crc kubenswrapper[4631]: E1129 04:13:13.217061 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:13 crc kubenswrapper[4631]: E1129 04:13:13.217362 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:13 crc kubenswrapper[4631]: E1129 04:13:13.217403 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:13 crc kubenswrapper[4631]: E1129 04:13:13.296657 4631 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 29 04:13:14 crc kubenswrapper[4631]: I1129 04:13:14.216557 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:14 crc kubenswrapper[4631]: E1129 04:13:14.217307 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:14 crc kubenswrapper[4631]: I1129 04:13:14.217721 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:13:14 crc kubenswrapper[4631]: E1129 04:13:14.217995 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2npl6_openshift-ovn-kubernetes(cda25410-78a0-47a1-894f-621a855bd64a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" Nov 29 04:13:15 crc kubenswrapper[4631]: I1129 04:13:15.215660 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:15 crc kubenswrapper[4631]: I1129 04:13:15.215743 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:15 crc kubenswrapper[4631]: I1129 04:13:15.215743 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:15 crc kubenswrapper[4631]: E1129 04:13:15.215844 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:15 crc kubenswrapper[4631]: E1129 04:13:15.216030 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:15 crc kubenswrapper[4631]: E1129 04:13:15.216166 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:16 crc kubenswrapper[4631]: I1129 04:13:16.215793 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:16 crc kubenswrapper[4631]: E1129 04:13:16.215989 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:17 crc kubenswrapper[4631]: I1129 04:13:17.215817 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:17 crc kubenswrapper[4631]: I1129 04:13:17.215922 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:17 crc kubenswrapper[4631]: E1129 04:13:17.215985 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:17 crc kubenswrapper[4631]: I1129 04:13:17.215922 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:17 crc kubenswrapper[4631]: E1129 04:13:17.216150 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:17 crc kubenswrapper[4631]: E1129 04:13:17.216309 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:18 crc kubenswrapper[4631]: I1129 04:13:18.216005 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:18 crc kubenswrapper[4631]: E1129 04:13:18.216202 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:18 crc kubenswrapper[4631]: E1129 04:13:18.298776 4631 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 29 04:13:19 crc kubenswrapper[4631]: I1129 04:13:19.215702 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:19 crc kubenswrapper[4631]: I1129 04:13:19.215738 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:19 crc kubenswrapper[4631]: I1129 04:13:19.215804 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:19 crc kubenswrapper[4631]: E1129 04:13:19.215923 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:19 crc kubenswrapper[4631]: E1129 04:13:19.216032 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:19 crc kubenswrapper[4631]: E1129 04:13:19.216196 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:20 crc kubenswrapper[4631]: I1129 04:13:20.216491 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:20 crc kubenswrapper[4631]: E1129 04:13:20.216684 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:21 crc kubenswrapper[4631]: I1129 04:13:21.215613 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:21 crc kubenswrapper[4631]: E1129 04:13:21.215801 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:21 crc kubenswrapper[4631]: I1129 04:13:21.216117 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:21 crc kubenswrapper[4631]: E1129 04:13:21.216252 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:21 crc kubenswrapper[4631]: I1129 04:13:21.216634 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:21 crc kubenswrapper[4631]: E1129 04:13:21.216924 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:21 crc kubenswrapper[4631]: I1129 04:13:21.217172 4631 scope.go:117] "RemoveContainer" containerID="73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614" Nov 29 04:13:21 crc kubenswrapper[4631]: I1129 04:13:21.246538 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-f4kth" podStartSLOduration=110.246510736 podStartE2EDuration="1m50.246510736s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:09.945629842 +0000 UTC m=+117.010133386" watchObservedRunningTime="2025-11-29 04:13:21.246510736 +0000 UTC m=+128.311014290" Nov 29 04:13:21 crc kubenswrapper[4631]: I1129 04:13:21.950751 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-pbk6b_7f871e13-bbe2-4104-8f40-70e695653fef/kube-multus/1.log" Nov 29 04:13:21 crc kubenswrapper[4631]: I1129 04:13:21.950832 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-pbk6b" event={"ID":"7f871e13-bbe2-4104-8f40-70e695653fef","Type":"ContainerStarted","Data":"8b098d771c091184a554145d83225c1b8122a63fed63b77f3eaf7d286223380b"} Nov 29 04:13:22 crc kubenswrapper[4631]: I1129 04:13:22.216524 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:22 crc kubenswrapper[4631]: E1129 04:13:22.216716 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:23 crc kubenswrapper[4631]: I1129 04:13:23.216010 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:23 crc kubenswrapper[4631]: I1129 04:13:23.216072 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:23 crc kubenswrapper[4631]: E1129 04:13:23.216183 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:23 crc kubenswrapper[4631]: I1129 04:13:23.216288 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:23 crc kubenswrapper[4631]: E1129 04:13:23.218376 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:23 crc kubenswrapper[4631]: E1129 04:13:23.218595 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:23 crc kubenswrapper[4631]: E1129 04:13:23.299677 4631 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 29 04:13:24 crc kubenswrapper[4631]: I1129 04:13:24.215619 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:24 crc kubenswrapper[4631]: E1129 04:13:24.216106 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:25 crc kubenswrapper[4631]: I1129 04:13:25.215510 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:25 crc kubenswrapper[4631]: E1129 04:13:25.215697 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:25 crc kubenswrapper[4631]: I1129 04:13:25.216008 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:25 crc kubenswrapper[4631]: I1129 04:13:25.216052 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:25 crc kubenswrapper[4631]: E1129 04:13:25.216144 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:25 crc kubenswrapper[4631]: E1129 04:13:25.216406 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:26 crc kubenswrapper[4631]: I1129 04:13:26.216463 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:26 crc kubenswrapper[4631]: E1129 04:13:26.217933 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:26 crc kubenswrapper[4631]: I1129 04:13:26.218552 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:13:26 crc kubenswrapper[4631]: I1129 04:13:26.968196 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/3.log" Nov 29 04:13:26 crc kubenswrapper[4631]: I1129 04:13:26.971199 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerStarted","Data":"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6"} Nov 29 04:13:26 crc kubenswrapper[4631]: I1129 04:13:26.971635 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:13:27 crc kubenswrapper[4631]: I1129 04:13:27.000982 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podStartSLOduration=116.000967037 podStartE2EDuration="1m56.000967037s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:26.999709806 +0000 UTC m=+134.064213330" watchObservedRunningTime="2025-11-29 04:13:27.000967037 +0000 UTC m=+134.065470561" Nov 29 04:13:27 crc kubenswrapper[4631]: I1129 04:13:27.168948 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-b6vgh"] Nov 29 04:13:27 crc kubenswrapper[4631]: I1129 04:13:27.169355 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:27 crc kubenswrapper[4631]: E1129 04:13:27.169508 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:27 crc kubenswrapper[4631]: I1129 04:13:27.215799 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:27 crc kubenswrapper[4631]: E1129 04:13:27.215966 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:27 crc kubenswrapper[4631]: I1129 04:13:27.216044 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:27 crc kubenswrapper[4631]: I1129 04:13:27.216083 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:27 crc kubenswrapper[4631]: E1129 04:13:27.216175 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:27 crc kubenswrapper[4631]: E1129 04:13:27.216231 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:28 crc kubenswrapper[4631]: E1129 04:13:28.300970 4631 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 29 04:13:29 crc kubenswrapper[4631]: I1129 04:13:29.215698 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:29 crc kubenswrapper[4631]: E1129 04:13:29.215916 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:29 crc kubenswrapper[4631]: I1129 04:13:29.216023 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:29 crc kubenswrapper[4631]: E1129 04:13:29.216104 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:29 crc kubenswrapper[4631]: I1129 04:13:29.216550 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:29 crc kubenswrapper[4631]: E1129 04:13:29.216674 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:29 crc kubenswrapper[4631]: I1129 04:13:29.216970 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:29 crc kubenswrapper[4631]: E1129 04:13:29.217069 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:31 crc kubenswrapper[4631]: I1129 04:13:31.215685 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:31 crc kubenswrapper[4631]: I1129 04:13:31.215717 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:31 crc kubenswrapper[4631]: I1129 04:13:31.215774 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:31 crc kubenswrapper[4631]: E1129 04:13:31.216708 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:31 crc kubenswrapper[4631]: E1129 04:13:31.216649 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:31 crc kubenswrapper[4631]: E1129 04:13:31.216799 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:31 crc kubenswrapper[4631]: I1129 04:13:31.215815 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:31 crc kubenswrapper[4631]: E1129 04:13:31.216924 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:33 crc kubenswrapper[4631]: I1129 04:13:33.215829 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:33 crc kubenswrapper[4631]: I1129 04:13:33.215825 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:33 crc kubenswrapper[4631]: I1129 04:13:33.215917 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:33 crc kubenswrapper[4631]: I1129 04:13:33.215912 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:33 crc kubenswrapper[4631]: E1129 04:13:33.218170 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 29 04:13:33 crc kubenswrapper[4631]: E1129 04:13:33.218412 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 29 04:13:33 crc kubenswrapper[4631]: E1129 04:13:33.218569 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-b6vgh" podUID="c6c5bb91-f03c-4672-bc61-69a68b8c89d6" Nov 29 04:13:33 crc kubenswrapper[4631]: E1129 04:13:33.218661 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 29 04:13:35 crc kubenswrapper[4631]: I1129 04:13:35.215949 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:35 crc kubenswrapper[4631]: I1129 04:13:35.216027 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:35 crc kubenswrapper[4631]: I1129 04:13:35.216033 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:35 crc kubenswrapper[4631]: I1129 04:13:35.216057 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:35 crc kubenswrapper[4631]: I1129 04:13:35.221288 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 29 04:13:35 crc kubenswrapper[4631]: I1129 04:13:35.221413 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 29 04:13:35 crc kubenswrapper[4631]: I1129 04:13:35.222701 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 29 04:13:35 crc kubenswrapper[4631]: I1129 04:13:35.222786 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 29 04:13:35 crc kubenswrapper[4631]: I1129 04:13:35.223311 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 29 04:13:35 crc kubenswrapper[4631]: I1129 04:13:35.226411 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.650179 4631 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.709521 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-mbmnz"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.710221 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.715677 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.717137 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.718440 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-txv66"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.718954 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.720126 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.723628 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.724251 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.724983 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sgtgs"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.725618 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.727218 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.728135 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.728217 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-7lch5"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.728866 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-r9xmz"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.728982 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.729182 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.729495 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.729747 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-r9xmz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.730306 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.730872 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.733580 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.736155 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.736621 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.737277 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.737496 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.740377 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.740647 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.740818 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.740948 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.741076 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.741468 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.741632 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.741714 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.741765 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.741946 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.742228 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.742402 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.742442 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.742450 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.745860 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.745875 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.745997 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.746076 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.746580 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.761753 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-swnf5"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.762352 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.762743 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.762928 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.763060 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.763310 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.763428 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.765644 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.765910 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.766100 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.766394 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.767139 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.767514 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.767695 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.768669 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.769635 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.746671 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.793561 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.794279 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.795275 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.795450 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.795495 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.795551 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.795631 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.795640 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.795706 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.796078 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-mljqh"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.795958 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.799155 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-896b9"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.796138 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.799938 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.796616 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.796658 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.796738 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.796738 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.796806 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.797030 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.801147 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.803190 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.804264 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805553 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805143 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4l8kb"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.812856 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8h4ns"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.813161 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.813564 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.813934 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-tfz6d"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805649 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.814384 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805653 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805697 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805723 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805732 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805735 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805742 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805819 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.805854 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.811687 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.815234 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.815343 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.815435 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.815508 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.815579 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.815707 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.818274 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.818732 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.819200 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.819427 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.819872 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.820364 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.820575 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.820693 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.822268 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-crmsz"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.822887 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.825679 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.826352 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.826538 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.827274 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.827918 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-pbcfc"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.828235 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.831432 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.831901 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.832302 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.832463 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.832479 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833108 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2de073f0-8f78-4378-9df5-758e30e7b896-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-6s252\" (UID: \"2de073f0-8f78-4378-9df5-758e30e7b896\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833201 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5e9aad10-398a-479a-b828-100682ad67c7-images\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833270 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vfdm\" (UniqueName: \"kubernetes.io/projected/386dd0c3-88ce-4690-978a-0ecd6f029d5c-kube-api-access-4vfdm\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833366 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm7hd\" (UniqueName: \"kubernetes.io/projected/9ed87add-9fae-43f8-acf1-e8b425d9afee-kube-api-access-sm7hd\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833447 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7qjg\" (UniqueName: \"kubernetes.io/projected/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-kube-api-access-v7qjg\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833523 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/41cf001b-6d25-483e-ad01-03a028926fff-available-featuregates\") pod \"openshift-config-operator-7777fb866f-mljqh\" (UID: \"41cf001b-6d25-483e-ad01-03a028926fff\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833597 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r892q\" (UniqueName: \"kubernetes.io/projected/2de073f0-8f78-4378-9df5-758e30e7b896-kube-api-access-r892q\") pod \"openshift-apiserver-operator-796bbdcf4f-6s252\" (UID: \"2de073f0-8f78-4378-9df5-758e30e7b896\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833673 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-config\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833736 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a965f6-f0d3-4745-a0fb-919f82d6159b-service-ca-bundle\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833801 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-oauth-config\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833865 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/386dd0c3-88ce-4690-978a-0ecd6f029d5c-node-pullsecrets\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.833933 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4e7b7b24-4b23-4ae5-842f-73e826f944d2-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834000 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834068 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvxxw\" (UniqueName: \"kubernetes.io/projected/7f223354-db6f-4227-9e64-39c01f942b11-kube-api-access-mvxxw\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834143 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-service-ca\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834207 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e7b7b24-4b23-4ae5-842f-73e826f944d2-audit-dir\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834273 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wmw7\" (UniqueName: \"kubernetes.io/projected/4e7b7b24-4b23-4ae5-842f-73e826f944d2-kube-api-access-4wmw7\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834359 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-config\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834433 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f223354-db6f-4227-9e64-39c01f942b11-serving-cert\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834513 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0619cd40-96ab-4a00-b716-d7538b375a81-auth-proxy-config\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834582 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msmsg\" (UniqueName: \"kubernetes.io/projected/5e9aad10-398a-479a-b828-100682ad67c7-kube-api-access-msmsg\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834657 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-client-ca\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834725 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834791 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4e7b7b24-4b23-4ae5-842f-73e826f944d2-etcd-client\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834875 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.834943 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2099834e-bb35-49e5-b94b-06cf1d172cb2-serving-cert\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835015 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md4nk\" (UniqueName: \"kubernetes.io/projected/86a965f6-f0d3-4745-a0fb-919f82d6159b-kube-api-access-md4nk\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835086 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835155 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86a965f6-f0d3-4745-a0fb-919f82d6159b-config\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835217 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-oauth-serving-cert\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835284 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41cf001b-6d25-483e-ad01-03a028926fff-serving-cert\") pod \"openshift-config-operator-7777fb866f-mljqh\" (UID: \"41cf001b-6d25-483e-ad01-03a028926fff\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835368 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835510 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835637 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a965f6-f0d3-4745-a0fb-919f82d6159b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835742 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4e7b7b24-4b23-4ae5-842f-73e826f944d2-encryption-config\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835827 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0619cd40-96ab-4a00-b716-d7538b375a81-machine-approver-tls\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835892 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hlnn\" (UniqueName: \"kubernetes.io/projected/002a7abb-c9ed-4ae8-92da-b4985ff0643c-kube-api-access-8hlnn\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.835976 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0619cd40-96ab-4a00-b716-d7538b375a81-config\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836066 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwqwt\" (UniqueName: \"kubernetes.io/projected/0619cd40-96ab-4a00-b716-d7538b375a81-kube-api-access-fwqwt\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836152 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-client-ca\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836214 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4e7b7b24-4b23-4ae5-842f-73e826f944d2-audit-policies\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836280 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-dir\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836396 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836478 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-config\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836544 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-etcd-serving-ca\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836610 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e9aad10-398a-479a-b828-100682ad67c7-config\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836669 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/5e9aad10-398a-479a-b828-100682ad67c7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836737 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/386dd0c3-88ce-4690-978a-0ecd6f029d5c-encryption-config\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836802 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dl6qp\" (UniqueName: \"kubernetes.io/projected/01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8-kube-api-access-dl6qp\") pod \"downloads-7954f5f757-r9xmz\" (UID: \"01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8\") " pod="openshift-console/downloads-7954f5f757-r9xmz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836865 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-trusted-ca-bundle\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836933 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-image-import-ca\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.836999 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-policies\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837068 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqskd\" (UniqueName: \"kubernetes.io/projected/41cf001b-6d25-483e-ad01-03a028926fff-kube-api-access-rqskd\") pod \"openshift-config-operator-7777fb866f-mljqh\" (UID: \"41cf001b-6d25-483e-ad01-03a028926fff\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837144 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2de073f0-8f78-4378-9df5-758e30e7b896-config\") pod \"openshift-apiserver-operator-796bbdcf4f-6s252\" (UID: \"2de073f0-8f78-4378-9df5-758e30e7b896\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837214 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837289 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/386dd0c3-88ce-4690-978a-0ecd6f029d5c-serving-cert\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837395 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-serving-cert\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837468 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e7b7b24-4b23-4ae5-842f-73e826f944d2-serving-cert\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837532 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837603 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e7b7b24-4b23-4ae5-842f-73e826f944d2-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837671 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837738 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p9pp\" (UniqueName: \"kubernetes.io/projected/2099834e-bb35-49e5-b94b-06cf1d172cb2-kube-api-access-8p9pp\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837805 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-config\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837870 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-audit\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.837936 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.838003 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.838091 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.838155 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.838222 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-trusted-ca-bundle\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.838284 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/386dd0c3-88ce-4690-978a-0ecd6f029d5c-etcd-client\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.838363 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/386dd0c3-88ce-4690-978a-0ecd6f029d5c-audit-dir\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.838429 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.838501 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/86a965f6-f0d3-4745-a0fb-919f82d6159b-serving-cert\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.856655 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.857057 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.857673 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.858585 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.859494 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.859780 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.862718 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.863821 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.863981 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.864176 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.864445 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-txv66"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.864513 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.864535 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.864750 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.864763 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.865090 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.866932 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.871668 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.871779 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.880674 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.882034 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.882116 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.886133 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.886200 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.886376 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.886387 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.888131 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.888414 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.889916 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.892650 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.893294 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lp9sb"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.893732 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.894090 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.894374 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.894504 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.894929 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-xwkb8"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.895310 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.895609 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.896156 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.897592 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.902786 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.905010 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.907766 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.909726 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-r9xmz"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.910295 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.910999 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.911129 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kwszg"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.919382 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.919645 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-mbmnz"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.919737 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.919909 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.920453 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.920545 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.921010 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.921639 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.925248 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.930420 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.934220 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.939780 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-config\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.939819 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-audit\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.939841 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.939866 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.939894 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p9pp\" (UniqueName: \"kubernetes.io/projected/2099834e-bb35-49e5-b94b-06cf1d172cb2-kube-api-access-8p9pp\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.939920 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48e794c9-0aa9-42e3-94d2-27e97f141d7c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-x5s4x\" (UID: \"48e794c9-0aa9-42e3-94d2-27e97f141d7c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.939949 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.939992 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.940013 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54fbm\" (UniqueName: \"kubernetes.io/projected/bc555ddf-f4df-4328-8396-c2ddaeb49ea0-kube-api-access-54fbm\") pod \"multus-admission-controller-857f4d67dd-lp9sb\" (UID: \"bc555ddf-f4df-4328-8396-c2ddaeb49ea0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.940034 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/386dd0c3-88ce-4690-978a-0ecd6f029d5c-etcd-client\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.940057 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/386dd0c3-88ce-4690-978a-0ecd6f029d5c-audit-dir\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.940122 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/386dd0c3-88ce-4690-978a-0ecd6f029d5c-audit-dir\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.940182 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.940288 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/86a965f6-f0d3-4745-a0fb-919f82d6159b-serving-cert\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.940369 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-trusted-ca-bundle\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.940397 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e4d8dab1-39ad-4c93-a452-5ecf8afda237-default-certificate\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.940454 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6tqk\" (UniqueName: \"kubernetes.io/projected/8361abea-c6bc-4927-a88b-c8318096d60d-kube-api-access-k6tqk\") pod \"control-plane-machine-set-operator-78cbb6b69f-x8t8v\" (UID: \"8361abea-c6bc-4927-a88b-c8318096d60d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.941396 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.942935 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2de073f0-8f78-4378-9df5-758e30e7b896-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-6s252\" (UID: \"2de073f0-8f78-4378-9df5-758e30e7b896\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.942971 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5e9aad10-398a-479a-b828-100682ad67c7-images\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.942993 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vfdm\" (UniqueName: \"kubernetes.io/projected/386dd0c3-88ce-4690-978a-0ecd6f029d5c-kube-api-access-4vfdm\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.943012 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm7hd\" (UniqueName: \"kubernetes.io/projected/9ed87add-9fae-43f8-acf1-e8b425d9afee-kube-api-access-sm7hd\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.943034 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7qjg\" (UniqueName: \"kubernetes.io/projected/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-kube-api-access-v7qjg\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.943054 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/41cf001b-6d25-483e-ad01-03a028926fff-available-featuregates\") pod \"openshift-config-operator-7777fb866f-mljqh\" (UID: \"41cf001b-6d25-483e-ad01-03a028926fff\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.943089 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r892q\" (UniqueName: \"kubernetes.io/projected/2de073f0-8f78-4378-9df5-758e30e7b896-kube-api-access-r892q\") pod \"openshift-apiserver-operator-796bbdcf4f-6s252\" (UID: \"2de073f0-8f78-4378-9df5-758e30e7b896\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.943112 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-config\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.943131 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a965f6-f0d3-4745-a0fb-919f82d6159b-service-ca-bundle\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.943149 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-oauth-config\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.943166 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/386dd0c3-88ce-4690-978a-0ecd6f029d5c-node-pullsecrets\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.943724 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.944479 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/41cf001b-6d25-483e-ad01-03a028926fff-available-featuregates\") pod \"openshift-config-operator-7777fb866f-mljqh\" (UID: \"41cf001b-6d25-483e-ad01-03a028926fff\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.944913 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4e7b7b24-4b23-4ae5-842f-73e826f944d2-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.945064 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-audit\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.945218 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/4e7b7b24-4b23-4ae5-842f-73e826f944d2-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.945778 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5e9aad10-398a-479a-b828-100682ad67c7-images\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.946587 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sgtgs"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.946897 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-trusted-ca-bundle\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.947904 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/386dd0c3-88ce-4690-978a-0ecd6f029d5c-etcd-client\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948270 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948386 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48e794c9-0aa9-42e3-94d2-27e97f141d7c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-x5s4x\" (UID: \"48e794c9-0aa9-42e3-94d2-27e97f141d7c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948562 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/386dd0c3-88ce-4690-978a-0ecd6f029d5c-node-pullsecrets\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948623 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48e794c9-0aa9-42e3-94d2-27e97f141d7c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-x5s4x\" (UID: \"48e794c9-0aa9-42e3-94d2-27e97f141d7c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948668 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-service-ca\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948702 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvxxw\" (UniqueName: \"kubernetes.io/projected/7f223354-db6f-4227-9e64-39c01f942b11-kube-api-access-mvxxw\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948780 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wmw7\" (UniqueName: \"kubernetes.io/projected/4e7b7b24-4b23-4ae5-842f-73e826f944d2-kube-api-access-4wmw7\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948809 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-config\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948832 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e7b7b24-4b23-4ae5-842f-73e826f944d2-audit-dir\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948852 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f223354-db6f-4227-9e64-39c01f942b11-serving-cert\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.948873 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msmsg\" (UniqueName: \"kubernetes.io/projected/5e9aad10-398a-479a-b828-100682ad67c7-kube-api-access-msmsg\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.949549 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a965f6-f0d3-4745-a0fb-919f82d6159b-service-ca-bundle\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.949764 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-config\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.949995 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.950112 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-mljqh"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.950078 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.950421 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-config\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.950871 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-service-ca\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.950928 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-config\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.950951 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bf469"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952134 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e7b7b24-4b23-4ae5-842f-73e826f944d2-audit-dir\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952278 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0619cd40-96ab-4a00-b716-d7538b375a81-auth-proxy-config\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952345 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952372 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4e7b7b24-4b23-4ae5-842f-73e826f944d2-etcd-client\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952427 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952459 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-client-ca\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952470 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-oauth-config\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952514 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2099834e-bb35-49e5-b94b-06cf1d172cb2-serving-cert\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952541 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/257f5a70-3421-4c15-acec-c898cb9c4fe6-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4t9l5\" (UID: \"257f5a70-3421-4c15-acec-c898cb9c4fe6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952563 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257f5a70-3421-4c15-acec-c898cb9c4fe6-config\") pod \"kube-apiserver-operator-766d6c64bb-4t9l5\" (UID: \"257f5a70-3421-4c15-acec-c898cb9c4fe6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952579 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2de073f0-8f78-4378-9df5-758e30e7b896-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-6s252\" (UID: \"2de073f0-8f78-4378-9df5-758e30e7b896\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.952659 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.953157 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md4nk\" (UniqueName: \"kubernetes.io/projected/86a965f6-f0d3-4745-a0fb-919f82d6159b-kube-api-access-md4nk\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.953293 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.953350 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86a965f6-f0d3-4745-a0fb-919f82d6159b-config\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.953473 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-oauth-serving-cert\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.953522 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41cf001b-6d25-483e-ad01-03a028926fff-serving-cert\") pod \"openshift-config-operator-7777fb866f-mljqh\" (UID: \"41cf001b-6d25-483e-ad01-03a028926fff\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.953971 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86a965f6-f0d3-4745-a0fb-919f82d6159b-config\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.954597 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-client-ca\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.955196 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0619cd40-96ab-4a00-b716-d7538b375a81-auth-proxy-config\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.955739 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.961282 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.961648 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.961738 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a965f6-f0d3-4745-a0fb-919f82d6159b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.961805 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4e7b7b24-4b23-4ae5-842f-73e826f944d2-encryption-config\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.962306 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41cf001b-6d25-483e-ad01-03a028926fff-serving-cert\") pod \"openshift-config-operator-7777fb866f-mljqh\" (UID: \"41cf001b-6d25-483e-ad01-03a028926fff\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.962514 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.962621 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86a965f6-f0d3-4745-a0fb-919f82d6159b-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.962771 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/86a965f6-f0d3-4745-a0fb-919f82d6159b-serving-cert\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.962857 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.962882 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.962985 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4e7b7b24-4b23-4ae5-842f-73e826f944d2-etcd-client\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.963004 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0619cd40-96ab-4a00-b716-d7538b375a81-machine-approver-tls\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.963895 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hlnn\" (UniqueName: \"kubernetes.io/projected/002a7abb-c9ed-4ae8-92da-b4985ff0643c-kube-api-access-8hlnn\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.963979 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0619cd40-96ab-4a00-b716-d7538b375a81-config\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964049 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwqwt\" (UniqueName: \"kubernetes.io/projected/0619cd40-96ab-4a00-b716-d7538b375a81-kube-api-access-fwqwt\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964133 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4e7b7b24-4b23-4ae5-842f-73e826f944d2-audit-policies\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964207 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964228 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-dir\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964311 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/257f5a70-3421-4c15-acec-c898cb9c4fe6-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4t9l5\" (UID: \"257f5a70-3421-4c15-acec-c898cb9c4fe6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964378 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-client-ca\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964423 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e4d8dab1-39ad-4c93-a452-5ecf8afda237-service-ca-bundle\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964462 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964487 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-config\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964511 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-etcd-serving-ca\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964536 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrtmz\" (UniqueName: \"kubernetes.io/projected/e4d8dab1-39ad-4c93-a452-5ecf8afda237-kube-api-access-qrtmz\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964564 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e9aad10-398a-479a-b828-100682ad67c7-config\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964590 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/5e9aad10-398a-479a-b828-100682ad67c7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964616 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/386dd0c3-88ce-4690-978a-0ecd6f029d5c-encryption-config\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964646 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-trusted-ca-bundle\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964672 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e4d8dab1-39ad-4c93-a452-5ecf8afda237-stats-auth\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964700 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4d8dab1-39ad-4c93-a452-5ecf8afda237-metrics-certs\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964730 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dl6qp\" (UniqueName: \"kubernetes.io/projected/01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8-kube-api-access-dl6qp\") pod \"downloads-7954f5f757-r9xmz\" (UID: \"01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8\") " pod="openshift-console/downloads-7954f5f757-r9xmz" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964754 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-policies\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964778 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqskd\" (UniqueName: \"kubernetes.io/projected/41cf001b-6d25-483e-ad01-03a028926fff-kube-api-access-rqskd\") pod \"openshift-config-operator-7777fb866f-mljqh\" (UID: \"41cf001b-6d25-483e-ad01-03a028926fff\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964802 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bc555ddf-f4df-4328-8396-c2ddaeb49ea0-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lp9sb\" (UID: \"bc555ddf-f4df-4328-8396-c2ddaeb49ea0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964829 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2de073f0-8f78-4378-9df5-758e30e7b896-config\") pod \"openshift-apiserver-operator-796bbdcf4f-6s252\" (UID: \"2de073f0-8f78-4378-9df5-758e30e7b896\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964852 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-image-import-ca\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964874 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/386dd0c3-88ce-4690-978a-0ecd6f029d5c-serving-cert\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964896 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964917 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e7b7b24-4b23-4ae5-842f-73e826f944d2-serving-cert\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964941 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964966 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8361abea-c6bc-4927-a88b-c8318096d60d-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-x8t8v\" (UID: \"8361abea-c6bc-4927-a88b-c8318096d60d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.964998 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-serving-cert\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.965023 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e7b7b24-4b23-4ae5-842f-73e826f944d2-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.965052 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.965366 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-dir\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.963386 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-oauth-serving-cert\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.965478 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.965539 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.966178 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0619cd40-96ab-4a00-b716-d7538b375a81-config\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.966476 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-client-ca\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.966853 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2de073f0-8f78-4378-9df5-758e30e7b896-config\") pod \"openshift-apiserver-operator-796bbdcf4f-6s252\" (UID: \"2de073f0-8f78-4378-9df5-758e30e7b896\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.966882 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-image-import-ca\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.967011 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4e7b7b24-4b23-4ae5-842f-73e826f944d2-audit-policies\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.967467 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.968210 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.968711 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f223354-db6f-4227-9e64-39c01f942b11-serving-cert\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.968738 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e7b7b24-4b23-4ae5-842f-73e826f944d2-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.968790 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-crmsz"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.969306 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.969318 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2099834e-bb35-49e5-b94b-06cf1d172cb2-serving-cert\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.969373 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.969478 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.969798 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-config\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.969933 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-etcd-serving-ca\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.970118 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.970390 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/4e7b7b24-4b23-4ae5-842f-73e826f944d2-encryption-config\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.970514 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e9aad10-398a-479a-b828-100682ad67c7-config\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.970724 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-policies\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.970942 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/386dd0c3-88ce-4690-978a-0ecd6f029d5c-trusted-ca-bundle\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.971286 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0619cd40-96ab-4a00-b716-d7538b375a81-machine-approver-tls\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.972004 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.972264 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/5e9aad10-398a-479a-b828-100682ad67c7-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.972308 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.972437 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/386dd0c3-88ce-4690-978a-0ecd6f029d5c-serving-cert\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.972780 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/386dd0c3-88ce-4690-978a-0ecd6f029d5c-encryption-config\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.973165 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.974766 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4e7b7b24-4b23-4ae5-842f-73e826f944d2-serving-cert\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.974871 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lp9sb"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.975621 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-serving-cert\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.977284 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-896b9"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.977566 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.978850 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.979892 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.981266 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4l8kb"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.982366 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-7lch5"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.983951 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-swnf5"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.984930 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.985873 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.986852 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-xwkb8"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.988050 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-59sqc"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.988655 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-59sqc" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.989201 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8h4ns"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.990277 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-tfz6d"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.991228 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zsmnf"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.992568 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-lsnw8"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.993071 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.993221 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.993316 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.993583 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bf469"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.995198 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.996220 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kwszg"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.997228 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.998222 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn"] Nov 29 04:13:38 crc kubenswrapper[4631]: I1129 04:13:38.999928 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-59sqc"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.001303 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.002396 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.003953 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.004852 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.005862 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zsmnf"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.006870 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.008157 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.009890 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.011088 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-lsnw8"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.012265 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-rv7nt"] Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.012548 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.013200 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.032404 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.052423 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.065767 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e4d8dab1-39ad-4c93-a452-5ecf8afda237-default-certificate\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.065798 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6tqk\" (UniqueName: \"kubernetes.io/projected/8361abea-c6bc-4927-a88b-c8318096d60d-kube-api-access-k6tqk\") pod \"control-plane-machine-set-operator-78cbb6b69f-x8t8v\" (UID: \"8361abea-c6bc-4927-a88b-c8318096d60d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.065849 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48e794c9-0aa9-42e3-94d2-27e97f141d7c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-x5s4x\" (UID: \"48e794c9-0aa9-42e3-94d2-27e97f141d7c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066088 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48e794c9-0aa9-42e3-94d2-27e97f141d7c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-x5s4x\" (UID: \"48e794c9-0aa9-42e3-94d2-27e97f141d7c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066150 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/257f5a70-3421-4c15-acec-c898cb9c4fe6-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4t9l5\" (UID: \"257f5a70-3421-4c15-acec-c898cb9c4fe6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066171 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257f5a70-3421-4c15-acec-c898cb9c4fe6-config\") pod \"kube-apiserver-operator-766d6c64bb-4t9l5\" (UID: \"257f5a70-3421-4c15-acec-c898cb9c4fe6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066378 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/257f5a70-3421-4c15-acec-c898cb9c4fe6-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4t9l5\" (UID: \"257f5a70-3421-4c15-acec-c898cb9c4fe6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066408 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e4d8dab1-39ad-4c93-a452-5ecf8afda237-service-ca-bundle\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066529 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrtmz\" (UniqueName: \"kubernetes.io/projected/e4d8dab1-39ad-4c93-a452-5ecf8afda237-kube-api-access-qrtmz\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066561 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e4d8dab1-39ad-4c93-a452-5ecf8afda237-stats-auth\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066646 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4d8dab1-39ad-4c93-a452-5ecf8afda237-metrics-certs\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066671 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bc555ddf-f4df-4328-8396-c2ddaeb49ea0-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lp9sb\" (UID: \"bc555ddf-f4df-4328-8396-c2ddaeb49ea0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066689 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8361abea-c6bc-4927-a88b-c8318096d60d-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-x8t8v\" (UID: \"8361abea-c6bc-4927-a88b-c8318096d60d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066726 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48e794c9-0aa9-42e3-94d2-27e97f141d7c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-x5s4x\" (UID: \"48e794c9-0aa9-42e3-94d2-27e97f141d7c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066745 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54fbm\" (UniqueName: \"kubernetes.io/projected/bc555ddf-f4df-4328-8396-c2ddaeb49ea0-kube-api-access-54fbm\") pod \"multus-admission-controller-857f4d67dd-lp9sb\" (UID: \"bc555ddf-f4df-4328-8396-c2ddaeb49ea0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.066745 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48e794c9-0aa9-42e3-94d2-27e97f141d7c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-x5s4x\" (UID: \"48e794c9-0aa9-42e3-94d2-27e97f141d7c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.068870 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48e794c9-0aa9-42e3-94d2-27e97f141d7c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-x5s4x\" (UID: \"48e794c9-0aa9-42e3-94d2-27e97f141d7c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.073173 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.092167 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.112639 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.132413 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.152966 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.172623 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.177142 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/257f5a70-3421-4c15-acec-c898cb9c4fe6-config\") pod \"kube-apiserver-operator-766d6c64bb-4t9l5\" (UID: \"257f5a70-3421-4c15-acec-c898cb9c4fe6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.192160 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.199927 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/257f5a70-3421-4c15-acec-c898cb9c4fe6-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4t9l5\" (UID: \"257f5a70-3421-4c15-acec-c898cb9c4fe6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.211805 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.232415 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.252664 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.272671 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.292941 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.312837 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.332866 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.353255 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.373735 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.395475 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.403811 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8361abea-c6bc-4927-a88b-c8318096d60d-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-x8t8v\" (UID: \"8361abea-c6bc-4927-a88b-c8318096d60d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.412954 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.431968 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.452647 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.473522 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.493367 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.513803 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.532956 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.553080 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.573533 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.592668 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.613550 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.633227 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.652971 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.672860 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.693525 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.712712 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.733541 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.741301 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e4d8dab1-39ad-4c93-a452-5ecf8afda237-default-certificate\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.753156 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.762884 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e4d8dab1-39ad-4c93-a452-5ecf8afda237-stats-auth\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.773553 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.781940 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4d8dab1-39ad-4c93-a452-5ecf8afda237-metrics-certs\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.793023 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.813185 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.818281 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e4d8dab1-39ad-4c93-a452-5ecf8afda237-service-ca-bundle\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.831287 4631 request.go:700] Waited for 1.002742886s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.832927 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.875063 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.892990 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.913254 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.933904 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.953081 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.972580 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 29 04:13:39 crc kubenswrapper[4631]: I1129 04:13:39.992854 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.013451 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.033317 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.042088 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bc555ddf-f4df-4328-8396-c2ddaeb49ea0-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-lp9sb\" (UID: \"bc555ddf-f4df-4328-8396-c2ddaeb49ea0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.053104 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.073008 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.092951 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.113514 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.133098 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.153434 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.173375 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.193246 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.232997 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.253107 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.273502 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.302979 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.313413 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.332968 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.353946 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.373155 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.392673 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.439685 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p9pp\" (UniqueName: \"kubernetes.io/projected/2099834e-bb35-49e5-b94b-06cf1d172cb2-kube-api-access-8p9pp\") pod \"controller-manager-879f6c89f-sgtgs\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.455392 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vfdm\" (UniqueName: \"kubernetes.io/projected/386dd0c3-88ce-4690-978a-0ecd6f029d5c-kube-api-access-4vfdm\") pod \"apiserver-76f77b778f-7lch5\" (UID: \"386dd0c3-88ce-4690-978a-0ecd6f029d5c\") " pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.483242 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7qjg\" (UniqueName: \"kubernetes.io/projected/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-kube-api-access-v7qjg\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.488534 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r892q\" (UniqueName: \"kubernetes.io/projected/2de073f0-8f78-4378-9df5-758e30e7b896-kube-api-access-r892q\") pod \"openshift-apiserver-operator-796bbdcf4f-6s252\" (UID: \"2de073f0-8f78-4378-9df5-758e30e7b896\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.507453 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm7hd\" (UniqueName: \"kubernetes.io/projected/9ed87add-9fae-43f8-acf1-e8b425d9afee-kube-api-access-sm7hd\") pod \"oauth-openshift-558db77b4-swnf5\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.515639 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.540709 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvxxw\" (UniqueName: \"kubernetes.io/projected/7f223354-db6f-4227-9e64-39c01f942b11-kube-api-access-mvxxw\") pod \"route-controller-manager-6576b87f9c-ct7j8\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.562909 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wmw7\" (UniqueName: \"kubernetes.io/projected/4e7b7b24-4b23-4ae5-842f-73e826f944d2-kube-api-access-4wmw7\") pod \"apiserver-7bbb656c7d-2bh74\" (UID: \"4e7b7b24-4b23-4ae5-842f-73e826f944d2\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.565517 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.572804 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msmsg\" (UniqueName: \"kubernetes.io/projected/5e9aad10-398a-479a-b828-100682ad67c7-kube-api-access-msmsg\") pod \"machine-api-operator-5694c8668f-txv66\" (UID: \"5e9aad10-398a-479a-b828-100682ad67c7\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.589602 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md4nk\" (UniqueName: \"kubernetes.io/projected/86a965f6-f0d3-4745-a0fb-919f82d6159b-kube-api-access-md4nk\") pod \"authentication-operator-69f744f599-mbmnz\" (UID: \"86a965f6-f0d3-4745-a0fb-919f82d6159b\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.601762 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.605390 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b1d2be36-2bd7-4b11-a4bc-111dde998ffb-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-ddddt\" (UID: \"b1d2be36-2bd7-4b11-a4bc-111dde998ffb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.612632 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.632767 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.635824 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.652928 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.674175 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.685542 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.695271 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.726296 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.743483 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hlnn\" (UniqueName: \"kubernetes.io/projected/002a7abb-c9ed-4ae8-92da-b4985ff0643c-kube-api-access-8hlnn\") pod \"console-f9d7485db-896b9\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.750551 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwqwt\" (UniqueName: \"kubernetes.io/projected/0619cd40-96ab-4a00-b716-d7538b375a81-kube-api-access-fwqwt\") pod \"machine-approver-56656f9798-9cjqm\" (UID: \"0619cd40-96ab-4a00-b716-d7538b375a81\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.752515 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.768042 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dl6qp\" (UniqueName: \"kubernetes.io/projected/01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8-kube-api-access-dl6qp\") pod \"downloads-7954f5f757-r9xmz\" (UID: \"01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8\") " pod="openshift-console/downloads-7954f5f757-r9xmz" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.789796 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqskd\" (UniqueName: \"kubernetes.io/projected/41cf001b-6d25-483e-ad01-03a028926fff-kube-api-access-rqskd\") pod \"openshift-config-operator-7777fb866f-mljqh\" (UID: \"41cf001b-6d25-483e-ad01-03a028926fff\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.792436 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.807311 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-r9xmz" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.814077 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.822838 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.830180 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.834302 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.838098 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.838190 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.850930 4631 request.go:700] Waited for 1.861651154s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.854648 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.876840 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.877182 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74"] Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.888283 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-txv66"] Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.896173 4631 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.912743 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.936692 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.964523 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.965832 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sgtgs"] Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.972451 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 29 04:13:40 crc kubenswrapper[4631]: I1129 04:13:40.998049 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.006459 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.014221 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.034683 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.036473 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8"] Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.037732 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" event={"ID":"5e9aad10-398a-479a-b828-100682ad67c7","Type":"ContainerStarted","Data":"4f5df93c99e09f347a2d46db31241c6da5a8b179917d9d2526bb305b72251908"} Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.040190 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" event={"ID":"4e7b7b24-4b23-4ae5-842f-73e826f944d2","Type":"ContainerStarted","Data":"da00ecd935f7b7c3af638930e6294c512869cc1a6b59d7fa4eb50644cd1c5f94"} Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.064728 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-swnf5"] Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.082951 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6tqk\" (UniqueName: \"kubernetes.io/projected/8361abea-c6bc-4927-a88b-c8318096d60d-kube-api-access-k6tqk\") pod \"control-plane-machine-set-operator-78cbb6b69f-x8t8v\" (UID: \"8361abea-c6bc-4927-a88b-c8318096d60d\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.100781 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.100922 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.100962 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.101840 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/257f5a70-3421-4c15-acec-c898cb9c4fe6-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4t9l5\" (UID: \"257f5a70-3421-4c15-acec-c898cb9c4fe6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:41 crc kubenswrapper[4631]: E1129 04:13:41.101983 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:15:43.101965296 +0000 UTC m=+270.166468810 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.103222 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.114691 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.133508 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrtmz\" (UniqueName: \"kubernetes.io/projected/e4d8dab1-39ad-4c93-a452-5ecf8afda237-kube-api-access-qrtmz\") pod \"router-default-5444994796-pbcfc\" (UID: \"e4d8dab1-39ad-4c93-a452-5ecf8afda237\") " pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:41 crc kubenswrapper[4631]: W1129 04:13:41.135254 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ed87add_9fae_43f8_acf1_e8b425d9afee.slice/crio-24127abc84f50b0e8edb2c94aa58ec4d0508680cabbf4cb732d0a853ddde988b WatchSource:0}: Error finding container 24127abc84f50b0e8edb2c94aa58ec4d0508680cabbf4cb732d0a853ddde988b: Status 404 returned error can't find the container with id 24127abc84f50b0e8edb2c94aa58ec4d0508680cabbf4cb732d0a853ddde988b Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.144023 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252"] Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.148208 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54fbm\" (UniqueName: \"kubernetes.io/projected/bc555ddf-f4df-4328-8396-c2ddaeb49ea0-kube-api-access-54fbm\") pod \"multus-admission-controller-857f4d67dd-lp9sb\" (UID: \"bc555ddf-f4df-4328-8396-c2ddaeb49ea0\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.150497 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48e794c9-0aa9-42e3-94d2-27e97f141d7c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-x5s4x\" (UID: \"48e794c9-0aa9-42e3-94d2-27e97f141d7c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.181573 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-896b9"] Nov 29 04:13:41 crc kubenswrapper[4631]: W1129 04:13:41.188932 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2de073f0_8f78_4378_9df5_758e30e7b896.slice/crio-153da4b3b850a49f782f781cf4779510cf5def048ca932f7f1bc394a0257d3fe WatchSource:0}: Error finding container 153da4b3b850a49f782f781cf4779510cf5def048ca932f7f1bc394a0257d3fe: Status 404 returned error can't find the container with id 153da4b3b850a49f782f781cf4779510cf5def048ca932f7f1bc394a0257d3fe Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.189231 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-7lch5"] Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.194254 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205578 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6kbj\" (UniqueName: \"kubernetes.io/projected/108bb116-2d8b-4e5c-90fa-fa22239d68ac-kube-api-access-k6kbj\") pod \"service-ca-operator-777779d784-bnmtn\" (UID: \"108bb116-2d8b-4e5c-90fa-fa22239d68ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205614 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205640 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72qjg\" (UniqueName: \"kubernetes.io/projected/ca1d8fec-8229-4904-9fc3-178914884ea0-kube-api-access-72qjg\") pod \"kube-storage-version-migrator-operator-b67b599dd-m24c2\" (UID: \"ca1d8fec-8229-4904-9fc3-178914884ea0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205661 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/108bb116-2d8b-4e5c-90fa-fa22239d68ac-config\") pod \"service-ca-operator-777779d784-bnmtn\" (UID: \"108bb116-2d8b-4e5c-90fa-fa22239d68ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205678 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/96b28de4-d863-4d8a-86fc-f55986dde2cc-etcd-client\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205719 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8649ca07-00c1-4783-ba8c-3ab66f168149-metrics-tls\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205733 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/96b28de4-d863-4d8a-86fc-f55986dde2cc-etcd-ca\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205749 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8649ca07-00c1-4783-ba8c-3ab66f168149-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205767 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a-signing-cabundle\") pod \"service-ca-9c57cc56f-xwkb8\" (UID: \"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a\") " pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205791 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d8fec-8229-4904-9fc3-178914884ea0-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-m24c2\" (UID: \"ca1d8fec-8229-4904-9fc3-178914884ea0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205807 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-bound-sa-token\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205852 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2794d648-d3ac-4540-85be-c80a1997ff1c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vqqsc\" (UID: \"2794d648-d3ac-4540-85be-c80a1997ff1c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205881 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a-signing-key\") pod \"service-ca-9c57cc56f-xwkb8\" (UID: \"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a\") " pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205903 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2efa169-6700-4901-a36d-cdba29de5269-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-hckm8\" (UID: \"c2efa169-6700-4901-a36d-cdba29de5269\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205923 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2efa169-6700-4901-a36d-cdba29de5269-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-hckm8\" (UID: \"c2efa169-6700-4901-a36d-cdba29de5269\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.205943 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3be4bf0e-d446-4474-900c-0a5aec6450a5-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qrnbr\" (UID: \"3be4bf0e-d446-4474-900c-0a5aec6450a5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.206176 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/679c6cbe-89a8-458a-a798-5f70ef026702-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-pspvc\" (UID: \"679c6cbe-89a8-458a-a798-5f70ef026702\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.206370 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3be4bf0e-d446-4474-900c-0a5aec6450a5-config\") pod \"kube-controller-manager-operator-78b949d7b-qrnbr\" (UID: \"3be4bf0e-d446-4474-900c-0a5aec6450a5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.206413 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b92fj\" (UniqueName: \"kubernetes.io/projected/f1ca18ee-d4fc-4e20-9719-7647edaa6296-kube-api-access-b92fj\") pod \"dns-operator-744455d44c-crmsz\" (UID: \"f1ca18ee-d4fc-4e20-9719-7647edaa6296\") " pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.206783 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/171d32d8-1dcb-497d-9724-d798414c5602-ca-trust-extracted\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.206818 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-registry-certificates\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.206849 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3be4bf0e-d446-4474-900c-0a5aec6450a5-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qrnbr\" (UID: \"3be4bf0e-d446-4474-900c-0a5aec6450a5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.206990 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/96b28de4-d863-4d8a-86fc-f55986dde2cc-serving-cert\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207007 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98724a81-608d-48f1-b876-e4354b2ff65d-config\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207050 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/9255468d-b26e-412e-84fd-ad5a94279720-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-x67rw\" (UID: \"9255468d-b26e-412e-84fd-ad5a94279720\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207073 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d8fec-8229-4904-9fc3-178914884ea0-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-m24c2\" (UID: \"ca1d8fec-8229-4904-9fc3-178914884ea0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207088 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8649ca07-00c1-4783-ba8c-3ab66f168149-trusted-ca\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207103 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7fg2\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-kube-api-access-m7fg2\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207171 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99vn7\" (UniqueName: \"kubernetes.io/projected/2794d648-d3ac-4540-85be-c80a1997ff1c-kube-api-access-99vn7\") pod \"olm-operator-6b444d44fb-vqqsc\" (UID: \"2794d648-d3ac-4540-85be-c80a1997ff1c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207197 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/108bb116-2d8b-4e5c-90fa-fa22239d68ac-serving-cert\") pod \"service-ca-operator-777779d784-bnmtn\" (UID: \"108bb116-2d8b-4e5c-90fa-fa22239d68ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207215 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d555l\" (UniqueName: \"kubernetes.io/projected/97f2dec2-800d-4308-9df6-61f5f94f2393-kube-api-access-d555l\") pod \"migrator-59844c95c7-xsjfr\" (UID: \"97f2dec2-800d-4308-9df6-61f5f94f2393\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207232 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f1ca18ee-d4fc-4e20-9719-7647edaa6296-metrics-tls\") pod \"dns-operator-744455d44c-crmsz\" (UID: \"f1ca18ee-d4fc-4e20-9719-7647edaa6296\") " pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207251 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lt9h8\" (UniqueName: \"kubernetes.io/projected/8649ca07-00c1-4783-ba8c-3ab66f168149-kube-api-access-lt9h8\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207267 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/98724a81-608d-48f1-b876-e4354b2ff65d-trusted-ca\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207297 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2794d648-d3ac-4540-85be-c80a1997ff1c-srv-cert\") pod \"olm-operator-6b444d44fb-vqqsc\" (UID: \"2794d648-d3ac-4540-85be-c80a1997ff1c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207324 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/171d32d8-1dcb-497d-9724-d798414c5602-installation-pull-secrets\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207398 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-trusted-ca\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207415 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-registry-tls\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207432 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnlkq\" (UniqueName: \"kubernetes.io/projected/7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a-kube-api-access-vnlkq\") pod \"service-ca-9c57cc56f-xwkb8\" (UID: \"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a\") " pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207463 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvbr9\" (UniqueName: \"kubernetes.io/projected/9255468d-b26e-412e-84fd-ad5a94279720-kube-api-access-wvbr9\") pod \"cluster-samples-operator-665b6dd947-x67rw\" (UID: \"9255468d-b26e-412e-84fd-ad5a94279720\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207478 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/98724a81-608d-48f1-b876-e4354b2ff65d-serving-cert\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207501 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207570 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnqcw\" (UniqueName: \"kubernetes.io/projected/679c6cbe-89a8-458a-a798-5f70ef026702-kube-api-access-cnqcw\") pod \"package-server-manager-789f6589d5-pspvc\" (UID: \"679c6cbe-89a8-458a-a798-5f70ef026702\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207598 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qkbf\" (UniqueName: \"kubernetes.io/projected/96b28de4-d863-4d8a-86fc-f55986dde2cc-kube-api-access-2qkbf\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207613 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wfn9\" (UniqueName: \"kubernetes.io/projected/98724a81-608d-48f1-b876-e4354b2ff65d-kube-api-access-5wfn9\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207631 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.207651 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szpw2\" (UniqueName: \"kubernetes.io/projected/c2efa169-6700-4901-a36d-cdba29de5269-kube-api-access-szpw2\") pod \"openshift-controller-manager-operator-756b6f6bc6-hckm8\" (UID: \"c2efa169-6700-4901-a36d-cdba29de5269\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.208382 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/96b28de4-d863-4d8a-86fc-f55986dde2cc-etcd-service-ca\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.208417 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96b28de4-d863-4d8a-86fc-f55986dde2cc-config\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: E1129 04:13:41.208919 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:41.708905503 +0000 UTC m=+148.773409007 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.210636 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" Nov 29 04:13:41 crc kubenswrapper[4631]: W1129 04:13:41.212934 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod386dd0c3_88ce_4690_978a_0ecd6f029d5c.slice/crio-1a82d52e3d1840ca40a7ccdb201497eb22327bdd81ff4a629b678be65ce13d3d WatchSource:0}: Error finding container 1a82d52e3d1840ca40a7ccdb201497eb22327bdd81ff4a629b678be65ce13d3d: Status 404 returned error can't find the container with id 1a82d52e3d1840ca40a7ccdb201497eb22327bdd81ff4a629b678be65ce13d3d Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.215421 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.216771 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.238965 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.243265 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-r9xmz"] Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.246744 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.260131 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.267771 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" Nov 29 04:13:41 crc kubenswrapper[4631]: W1129 04:13:41.269340 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01cc7cce_92f8_44d1_9dd3_2d0b0742b3a8.slice/crio-bd312b3479fc14e4e39f17d5803a08d7d26d3ddb0b645822425df023dad54612 WatchSource:0}: Error finding container bd312b3479fc14e4e39f17d5803a08d7d26d3ddb0b645822425df023dad54612: Status 404 returned error can't find the container with id bd312b3479fc14e4e39f17d5803a08d7d26d3ddb0b645822425df023dad54612 Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.272352 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.309826 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:41 crc kubenswrapper[4631]: E1129 04:13:41.309908 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:41.809878263 +0000 UTC m=+148.874381767 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.310446 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8649ca07-00c1-4783-ba8c-3ab66f168149-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.310532 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a-signing-cabundle\") pod \"service-ca-9c57cc56f-xwkb8\" (UID: \"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a\") " pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.310604 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/59dfd767-fdf5-4034-85db-6ae66566ee59-tmpfs\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.310672 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d8fec-8229-4904-9fc3-178914884ea0-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-m24c2\" (UID: \"ca1d8fec-8229-4904-9fc3-178914884ea0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.310807 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp4ss\" (UniqueName: \"kubernetes.io/projected/c87c11b9-cb13-4548-b9f3-f40b9962b739-kube-api-access-wp4ss\") pod \"ingress-canary-59sqc\" (UID: \"c87c11b9-cb13-4548-b9f3-f40b9962b739\") " pod="openshift-ingress-canary/ingress-canary-59sqc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.310872 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kwszg\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.310950 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-bound-sa-token\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.311015 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2794d648-d3ac-4540-85be-c80a1997ff1c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vqqsc\" (UID: \"2794d648-d3ac-4540-85be-c80a1997ff1c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.311086 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a-signing-key\") pod \"service-ca-9c57cc56f-xwkb8\" (UID: \"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a\") " pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.311155 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2efa169-6700-4901-a36d-cdba29de5269-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-hckm8\" (UID: \"c2efa169-6700-4901-a36d-cdba29de5269\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.311222 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2efa169-6700-4901-a36d-cdba29de5269-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-hckm8\" (UID: \"c2efa169-6700-4901-a36d-cdba29de5269\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.311285 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3be4bf0e-d446-4474-900c-0a5aec6450a5-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qrnbr\" (UID: \"3be4bf0e-d446-4474-900c-0a5aec6450a5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.311362 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab-node-bootstrap-token\") pod \"machine-config-server-rv7nt\" (UID: \"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab\") " pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.311451 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/679c6cbe-89a8-458a-a798-5f70ef026702-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-pspvc\" (UID: \"679c6cbe-89a8-458a-a798-5f70ef026702\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.311943 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5v2p\" (UniqueName: \"kubernetes.io/projected/78552157-0b5f-437a-988a-71805a812ab2-kube-api-access-p5v2p\") pod \"collect-profiles-29406480-7l9nf\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.312170 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2tvl\" (UniqueName: \"kubernetes.io/projected/139e3088-813e-4ac6-8145-41aaf955cce6-kube-api-access-d2tvl\") pod \"dns-default-lsnw8\" (UID: \"139e3088-813e-4ac6-8145-41aaf955cce6\") " pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.312347 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl7jm\" (UniqueName: \"kubernetes.io/projected/73c987cc-2f81-4c56-a41d-1f8c9d601fa9-kube-api-access-tl7jm\") pod \"machine-config-controller-84d6567774-bf469\" (UID: \"73c987cc-2f81-4c56-a41d-1f8c9d601fa9\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.312420 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jg85\" (UniqueName: \"kubernetes.io/projected/59dfd767-fdf5-4034-85db-6ae66566ee59-kube-api-access-2jg85\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.312556 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8e76293e-e7ed-4b04-8941-d41fe405c987-profile-collector-cert\") pod \"catalog-operator-68c6474976-l66gm\" (UID: \"8e76293e-e7ed-4b04-8941-d41fe405c987\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.312706 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b92fj\" (UniqueName: \"kubernetes.io/projected/f1ca18ee-d4fc-4e20-9719-7647edaa6296-kube-api-access-b92fj\") pod \"dns-operator-744455d44c-crmsz\" (UID: \"f1ca18ee-d4fc-4e20-9719-7647edaa6296\") " pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.313153 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3be4bf0e-d446-4474-900c-0a5aec6450a5-config\") pod \"kube-controller-manager-operator-78b949d7b-qrnbr\" (UID: \"3be4bf0e-d446-4474-900c-0a5aec6450a5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.313675 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/171d32d8-1dcb-497d-9724-d798414c5602-ca-trust-extracted\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.314126 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-registry-certificates\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.314731 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3be4bf0e-d446-4474-900c-0a5aec6450a5-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qrnbr\" (UID: \"3be4bf0e-d446-4474-900c-0a5aec6450a5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.314909 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-registration-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.315112 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/73c987cc-2f81-4c56-a41d-1f8c9d601fa9-proxy-tls\") pod \"machine-config-controller-84d6567774-bf469\" (UID: \"73c987cc-2f81-4c56-a41d-1f8c9d601fa9\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.315275 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl95w\" (UniqueName: \"kubernetes.io/projected/dba614c7-ceae-4ce5-afb6-6d082156f640-kube-api-access-pl95w\") pod \"marketplace-operator-79b997595-kwszg\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.315639 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3be4bf0e-d446-4474-900c-0a5aec6450a5-config\") pod \"kube-controller-manager-operator-78b949d7b-qrnbr\" (UID: \"3be4bf0e-d446-4474-900c-0a5aec6450a5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.312953 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a-signing-cabundle\") pod \"service-ca-9c57cc56f-xwkb8\" (UID: \"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a\") " pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.316179 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-mountpoint-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.316793 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/96b28de4-d863-4d8a-86fc-f55986dde2cc-serving-cert\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.316825 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98724a81-608d-48f1-b876-e4354b2ff65d-config\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.316954 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/9255468d-b26e-412e-84fd-ad5a94279720-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-x67rw\" (UID: \"9255468d-b26e-412e-84fd-ad5a94279720\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.316979 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kwszg\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.316997 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-csi-data-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317015 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbxb5\" (UniqueName: \"kubernetes.io/projected/4977eaff-2a0a-4dac-8d85-bc2a207254cb-kube-api-access-vbxb5\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317047 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d8fec-8229-4904-9fc3-178914884ea0-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-m24c2\" (UID: \"ca1d8fec-8229-4904-9fc3-178914884ea0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317065 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8649ca07-00c1-4783-ba8c-3ab66f168149-trusted-ca\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317091 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7fg2\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-kube-api-access-m7fg2\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317111 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khmtz\" (UniqueName: \"kubernetes.io/projected/a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab-kube-api-access-khmtz\") pod \"machine-config-server-rv7nt\" (UID: \"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab\") " pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317127 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73c987cc-2f81-4c56-a41d-1f8c9d601fa9-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bf469\" (UID: \"73c987cc-2f81-4c56-a41d-1f8c9d601fa9\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317145 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99vn7\" (UniqueName: \"kubernetes.io/projected/2794d648-d3ac-4540-85be-c80a1997ff1c-kube-api-access-99vn7\") pod \"olm-operator-6b444d44fb-vqqsc\" (UID: \"2794d648-d3ac-4540-85be-c80a1997ff1c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317165 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/139e3088-813e-4ac6-8145-41aaf955cce6-config-volume\") pod \"dns-default-lsnw8\" (UID: \"139e3088-813e-4ac6-8145-41aaf955cce6\") " pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317216 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f1ca18ee-d4fc-4e20-9719-7647edaa6296-metrics-tls\") pod \"dns-operator-744455d44c-crmsz\" (UID: \"f1ca18ee-d4fc-4e20-9719-7647edaa6296\") " pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317231 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/108bb116-2d8b-4e5c-90fa-fa22239d68ac-serving-cert\") pod \"service-ca-operator-777779d784-bnmtn\" (UID: \"108bb116-2d8b-4e5c-90fa-fa22239d68ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317247 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d555l\" (UniqueName: \"kubernetes.io/projected/97f2dec2-800d-4308-9df6-61f5f94f2393-kube-api-access-d555l\") pod \"migrator-59844c95c7-xsjfr\" (UID: \"97f2dec2-800d-4308-9df6-61f5f94f2393\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317264 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lt9h8\" (UniqueName: \"kubernetes.io/projected/8649ca07-00c1-4783-ba8c-3ab66f168149-kube-api-access-lt9h8\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317281 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/98724a81-608d-48f1-b876-e4354b2ff65d-trusted-ca\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317299 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfffv\" (UniqueName: \"kubernetes.io/projected/d9c7d795-834c-498b-a012-b4d48c277f9c-kube-api-access-wfffv\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317347 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/171d32d8-1dcb-497d-9724-d798414c5602-installation-pull-secrets\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317363 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2794d648-d3ac-4540-85be-c80a1997ff1c-srv-cert\") pod \"olm-operator-6b444d44fb-vqqsc\" (UID: \"2794d648-d3ac-4540-85be-c80a1997ff1c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317380 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/171d32d8-1dcb-497d-9724-d798414c5602-ca-trust-extracted\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317386 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2nwp\" (UniqueName: \"kubernetes.io/projected/8e76293e-e7ed-4b04-8941-d41fe405c987-kube-api-access-t2nwp\") pod \"catalog-operator-68c6474976-l66gm\" (UID: \"8e76293e-e7ed-4b04-8941-d41fe405c987\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317444 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-trusted-ca\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.316671 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2efa169-6700-4901-a36d-cdba29de5269-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-hckm8\" (UID: \"c2efa169-6700-4901-a36d-cdba29de5269\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317496 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-registry-tls\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317514 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnlkq\" (UniqueName: \"kubernetes.io/projected/7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a-kube-api-access-vnlkq\") pod \"service-ca-9c57cc56f-xwkb8\" (UID: \"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a\") " pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317535 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvbr9\" (UniqueName: \"kubernetes.io/projected/9255468d-b26e-412e-84fd-ad5a94279720-kube-api-access-wvbr9\") pod \"cluster-samples-operator-665b6dd947-x67rw\" (UID: \"9255468d-b26e-412e-84fd-ad5a94279720\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317557 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d9c7d795-834c-498b-a012-b4d48c277f9c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317577 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/59dfd767-fdf5-4034-85db-6ae66566ee59-apiservice-cert\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317617 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317635 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/98724a81-608d-48f1-b876-e4354b2ff65d-serving-cert\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317683 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/78552157-0b5f-437a-988a-71805a812ab2-secret-volume\") pod \"collect-profiles-29406480-7l9nf\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317700 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d9c7d795-834c-498b-a012-b4d48c277f9c-images\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317716 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab-certs\") pod \"machine-config-server-rv7nt\" (UID: \"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab\") " pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317765 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnqcw\" (UniqueName: \"kubernetes.io/projected/679c6cbe-89a8-458a-a798-5f70ef026702-kube-api-access-cnqcw\") pod \"package-server-manager-789f6589d5-pspvc\" (UID: \"679c6cbe-89a8-458a-a798-5f70ef026702\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317788 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d9c7d795-834c-498b-a012-b4d48c277f9c-proxy-tls\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317817 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szpw2\" (UniqueName: \"kubernetes.io/projected/c2efa169-6700-4901-a36d-cdba29de5269-kube-api-access-szpw2\") pod \"openshift-controller-manager-operator-756b6f6bc6-hckm8\" (UID: \"c2efa169-6700-4901-a36d-cdba29de5269\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317834 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qkbf\" (UniqueName: \"kubernetes.io/projected/96b28de4-d863-4d8a-86fc-f55986dde2cc-kube-api-access-2qkbf\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317852 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wfn9\" (UniqueName: \"kubernetes.io/projected/98724a81-608d-48f1-b876-e4354b2ff65d-kube-api-access-5wfn9\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317879 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/96b28de4-d863-4d8a-86fc-f55986dde2cc-etcd-service-ca\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317894 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8e76293e-e7ed-4b04-8941-d41fe405c987-srv-cert\") pod \"catalog-operator-68c6474976-l66gm\" (UID: \"8e76293e-e7ed-4b04-8941-d41fe405c987\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317910 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-socket-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317929 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96b28de4-d863-4d8a-86fc-f55986dde2cc-config\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317957 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/139e3088-813e-4ac6-8145-41aaf955cce6-metrics-tls\") pod \"dns-default-lsnw8\" (UID: \"139e3088-813e-4ac6-8145-41aaf955cce6\") " pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317971 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/59dfd767-fdf5-4034-85db-6ae66566ee59-webhook-cert\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.318000 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6kbj\" (UniqueName: \"kubernetes.io/projected/108bb116-2d8b-4e5c-90fa-fa22239d68ac-kube-api-access-k6kbj\") pod \"service-ca-operator-777779d784-bnmtn\" (UID: \"108bb116-2d8b-4e5c-90fa-fa22239d68ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.318030 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-plugins-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.318048 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72qjg\" (UniqueName: \"kubernetes.io/projected/ca1d8fec-8229-4904-9fc3-178914884ea0-kube-api-access-72qjg\") pod \"kube-storage-version-migrator-operator-b67b599dd-m24c2\" (UID: \"ca1d8fec-8229-4904-9fc3-178914884ea0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.318065 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/108bb116-2d8b-4e5c-90fa-fa22239d68ac-config\") pod \"service-ca-operator-777779d784-bnmtn\" (UID: \"108bb116-2d8b-4e5c-90fa-fa22239d68ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.318081 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c87c11b9-cb13-4548-b9f3-f40b9962b739-cert\") pod \"ingress-canary-59sqc\" (UID: \"c87c11b9-cb13-4548-b9f3-f40b9962b739\") " pod="openshift-ingress-canary/ingress-canary-59sqc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.318095 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/78552157-0b5f-437a-988a-71805a812ab2-config-volume\") pod \"collect-profiles-29406480-7l9nf\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.318115 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/96b28de4-d863-4d8a-86fc-f55986dde2cc-etcd-client\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.318148 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/96b28de4-d863-4d8a-86fc-f55986dde2cc-etcd-ca\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.318167 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8649ca07-00c1-4783-ba8c-3ab66f168149-metrics-tls\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.319656 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-trusted-ca\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.321140 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/98724a81-608d-48f1-b876-e4354b2ff65d-trusted-ca\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.322660 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/108bb116-2d8b-4e5c-90fa-fa22239d68ac-serving-cert\") pod \"service-ca-operator-777779d784-bnmtn\" (UID: \"108bb116-2d8b-4e5c-90fa-fa22239d68ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.322750 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3be4bf0e-d446-4474-900c-0a5aec6450a5-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qrnbr\" (UID: \"3be4bf0e-d446-4474-900c-0a5aec6450a5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.323585 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2794d648-d3ac-4540-85be-c80a1997ff1c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vqqsc\" (UID: \"2794d648-d3ac-4540-85be-c80a1997ff1c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:41 crc kubenswrapper[4631]: E1129 04:13:41.323742 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:41.823725214 +0000 UTC m=+148.888228728 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.324834 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d8fec-8229-4904-9fc3-178914884ea0-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-m24c2\" (UID: \"ca1d8fec-8229-4904-9fc3-178914884ea0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.326074 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8649ca07-00c1-4783-ba8c-3ab66f168149-trusted-ca\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.326907 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2efa169-6700-4901-a36d-cdba29de5269-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-hckm8\" (UID: \"c2efa169-6700-4901-a36d-cdba29de5269\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.327752 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8649ca07-00c1-4783-ba8c-3ab66f168149-metrics-tls\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.329179 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/96b28de4-d863-4d8a-86fc-f55986dde2cc-etcd-service-ca\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.329213 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a-signing-key\") pod \"service-ca-9c57cc56f-xwkb8\" (UID: \"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a\") " pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.329619 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96b28de4-d863-4d8a-86fc-f55986dde2cc-config\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.330322 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/108bb116-2d8b-4e5c-90fa-fa22239d68ac-config\") pod \"service-ca-operator-777779d784-bnmtn\" (UID: \"108bb116-2d8b-4e5c-90fa-fa22239d68ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.317363 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-registry-certificates\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.331453 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d8fec-8229-4904-9fc3-178914884ea0-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-m24c2\" (UID: \"ca1d8fec-8229-4904-9fc3-178914884ea0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.331998 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/96b28de4-d863-4d8a-86fc-f55986dde2cc-etcd-ca\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.332256 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98724a81-608d-48f1-b876-e4354b2ff65d-config\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.333080 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-registry-tls\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.333538 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/679c6cbe-89a8-458a-a798-5f70ef026702-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-pspvc\" (UID: \"679c6cbe-89a8-458a-a798-5f70ef026702\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.333637 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2794d648-d3ac-4540-85be-c80a1997ff1c-srv-cert\") pod \"olm-operator-6b444d44fb-vqqsc\" (UID: \"2794d648-d3ac-4540-85be-c80a1997ff1c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.334750 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f1ca18ee-d4fc-4e20-9719-7647edaa6296-metrics-tls\") pod \"dns-operator-744455d44c-crmsz\" (UID: \"f1ca18ee-d4fc-4e20-9719-7647edaa6296\") " pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.347925 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/98724a81-608d-48f1-b876-e4354b2ff65d-serving-cert\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.350801 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/96b28de4-d863-4d8a-86fc-f55986dde2cc-etcd-client\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.351080 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/96b28de4-d863-4d8a-86fc-f55986dde2cc-serving-cert\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.351511 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/9255468d-b26e-412e-84fd-ad5a94279720-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-x67rw\" (UID: \"9255468d-b26e-412e-84fd-ad5a94279720\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.351846 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/171d32d8-1dcb-497d-9724-d798414c5602-installation-pull-secrets\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.354765 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8649ca07-00c1-4783-ba8c-3ab66f168149-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.369764 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-bound-sa-token\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.386564 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3be4bf0e-d446-4474-900c-0a5aec6450a5-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qrnbr\" (UID: \"3be4bf0e-d446-4474-900c-0a5aec6450a5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.405854 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b92fj\" (UniqueName: \"kubernetes.io/projected/f1ca18ee-d4fc-4e20-9719-7647edaa6296-kube-api-access-b92fj\") pod \"dns-operator-744455d44c-crmsz\" (UID: \"f1ca18ee-d4fc-4e20-9719-7647edaa6296\") " pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.419562 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.419724 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d9c7d795-834c-498b-a012-b4d48c277f9c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.419748 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/59dfd767-fdf5-4034-85db-6ae66566ee59-apiservice-cert\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.419780 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/78552157-0b5f-437a-988a-71805a812ab2-secret-volume\") pod \"collect-profiles-29406480-7l9nf\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.419799 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d9c7d795-834c-498b-a012-b4d48c277f9c-images\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.419812 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab-certs\") pod \"machine-config-server-rv7nt\" (UID: \"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab\") " pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.419839 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d9c7d795-834c-498b-a012-b4d48c277f9c-proxy-tls\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420153 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8e76293e-e7ed-4b04-8941-d41fe405c987-srv-cert\") pod \"catalog-operator-68c6474976-l66gm\" (UID: \"8e76293e-e7ed-4b04-8941-d41fe405c987\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420178 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-socket-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420204 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/139e3088-813e-4ac6-8145-41aaf955cce6-metrics-tls\") pod \"dns-default-lsnw8\" (UID: \"139e3088-813e-4ac6-8145-41aaf955cce6\") " pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420222 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/59dfd767-fdf5-4034-85db-6ae66566ee59-webhook-cert\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420246 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-plugins-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420260 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/78552157-0b5f-437a-988a-71805a812ab2-config-volume\") pod \"collect-profiles-29406480-7l9nf\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420280 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c87c11b9-cb13-4548-b9f3-f40b9962b739-cert\") pod \"ingress-canary-59sqc\" (UID: \"c87c11b9-cb13-4548-b9f3-f40b9962b739\") " pod="openshift-ingress-canary/ingress-canary-59sqc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420302 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/59dfd767-fdf5-4034-85db-6ae66566ee59-tmpfs\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420318 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kwszg\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420352 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp4ss\" (UniqueName: \"kubernetes.io/projected/c87c11b9-cb13-4548-b9f3-f40b9962b739-kube-api-access-wp4ss\") pod \"ingress-canary-59sqc\" (UID: \"c87c11b9-cb13-4548-b9f3-f40b9962b739\") " pod="openshift-ingress-canary/ingress-canary-59sqc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420375 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab-node-bootstrap-token\") pod \"machine-config-server-rv7nt\" (UID: \"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab\") " pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420392 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5v2p\" (UniqueName: \"kubernetes.io/projected/78552157-0b5f-437a-988a-71805a812ab2-kube-api-access-p5v2p\") pod \"collect-profiles-29406480-7l9nf\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420415 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2tvl\" (UniqueName: \"kubernetes.io/projected/139e3088-813e-4ac6-8145-41aaf955cce6-kube-api-access-d2tvl\") pod \"dns-default-lsnw8\" (UID: \"139e3088-813e-4ac6-8145-41aaf955cce6\") " pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420437 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jg85\" (UniqueName: \"kubernetes.io/projected/59dfd767-fdf5-4034-85db-6ae66566ee59-kube-api-access-2jg85\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420459 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8e76293e-e7ed-4b04-8941-d41fe405c987-profile-collector-cert\") pod \"catalog-operator-68c6474976-l66gm\" (UID: \"8e76293e-e7ed-4b04-8941-d41fe405c987\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420481 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl7jm\" (UniqueName: \"kubernetes.io/projected/73c987cc-2f81-4c56-a41d-1f8c9d601fa9-kube-api-access-tl7jm\") pod \"machine-config-controller-84d6567774-bf469\" (UID: \"73c987cc-2f81-4c56-a41d-1f8c9d601fa9\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420510 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-registration-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420533 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/73c987cc-2f81-4c56-a41d-1f8c9d601fa9-proxy-tls\") pod \"machine-config-controller-84d6567774-bf469\" (UID: \"73c987cc-2f81-4c56-a41d-1f8c9d601fa9\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420552 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl95w\" (UniqueName: \"kubernetes.io/projected/dba614c7-ceae-4ce5-afb6-6d082156f640-kube-api-access-pl95w\") pod \"marketplace-operator-79b997595-kwszg\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420565 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-mountpoint-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420582 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kwszg\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420596 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-csi-data-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420610 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbxb5\" (UniqueName: \"kubernetes.io/projected/4977eaff-2a0a-4dac-8d85-bc2a207254cb-kube-api-access-vbxb5\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420635 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khmtz\" (UniqueName: \"kubernetes.io/projected/a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab-kube-api-access-khmtz\") pod \"machine-config-server-rv7nt\" (UID: \"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab\") " pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420652 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73c987cc-2f81-4c56-a41d-1f8c9d601fa9-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bf469\" (UID: \"73c987cc-2f81-4c56-a41d-1f8c9d601fa9\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420673 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/139e3088-813e-4ac6-8145-41aaf955cce6-config-volume\") pod \"dns-default-lsnw8\" (UID: \"139e3088-813e-4ac6-8145-41aaf955cce6\") " pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420703 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfffv\" (UniqueName: \"kubernetes.io/projected/d9c7d795-834c-498b-a012-b4d48c277f9c-kube-api-access-wfffv\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.420719 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2nwp\" (UniqueName: \"kubernetes.io/projected/8e76293e-e7ed-4b04-8941-d41fe405c987-kube-api-access-t2nwp\") pod \"catalog-operator-68c6474976-l66gm\" (UID: \"8e76293e-e7ed-4b04-8941-d41fe405c987\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:41 crc kubenswrapper[4631]: E1129 04:13:41.421026 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:41.921011463 +0000 UTC m=+148.985514977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.421687 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d9c7d795-834c-498b-a012-b4d48c277f9c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.426452 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/59dfd767-fdf5-4034-85db-6ae66566ee59-tmpfs\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.426590 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-plugins-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.427253 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/78552157-0b5f-437a-988a-71805a812ab2-config-volume\") pod \"collect-profiles-29406480-7l9nf\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.429236 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/78552157-0b5f-437a-988a-71805a812ab2-secret-volume\") pod \"collect-profiles-29406480-7l9nf\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.429783 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-registration-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.430181 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kwszg\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.432463 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-mountpoint-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.432670 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-csi-data-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.432704 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/4977eaff-2a0a-4dac-8d85-bc2a207254cb-socket-dir\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.432932 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/59dfd767-fdf5-4034-85db-6ae66566ee59-webhook-cert\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.433171 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d9c7d795-834c-498b-a012-b4d48c277f9c-images\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.434286 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/139e3088-813e-4ac6-8145-41aaf955cce6-config-volume\") pod \"dns-default-lsnw8\" (UID: \"139e3088-813e-4ac6-8145-41aaf955cce6\") " pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.434748 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/59dfd767-fdf5-4034-85db-6ae66566ee59-apiservice-cert\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.435354 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/73c987cc-2f81-4c56-a41d-1f8c9d601fa9-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bf469\" (UID: \"73c987cc-2f81-4c56-a41d-1f8c9d601fa9\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.436635 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8e76293e-e7ed-4b04-8941-d41fe405c987-profile-collector-cert\") pod \"catalog-operator-68c6474976-l66gm\" (UID: \"8e76293e-e7ed-4b04-8941-d41fe405c987\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.444553 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.453041 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/73c987cc-2f81-4c56-a41d-1f8c9d601fa9-proxy-tls\") pod \"machine-config-controller-84d6567774-bf469\" (UID: \"73c987cc-2f81-4c56-a41d-1f8c9d601fa9\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.453137 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d9c7d795-834c-498b-a012-b4d48c277f9c-proxy-tls\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.456881 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99vn7\" (UniqueName: \"kubernetes.io/projected/2794d648-d3ac-4540-85be-c80a1997ff1c-kube-api-access-99vn7\") pod \"olm-operator-6b444d44fb-vqqsc\" (UID: \"2794d648-d3ac-4540-85be-c80a1997ff1c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.457223 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/139e3088-813e-4ac6-8145-41aaf955cce6-metrics-tls\") pod \"dns-default-lsnw8\" (UID: \"139e3088-813e-4ac6-8145-41aaf955cce6\") " pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.457961 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8e76293e-e7ed-4b04-8941-d41fe405c987-srv-cert\") pod \"catalog-operator-68c6474976-l66gm\" (UID: \"8e76293e-e7ed-4b04-8941-d41fe405c987\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.457978 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c87c11b9-cb13-4548-b9f3-f40b9962b739-cert\") pod \"ingress-canary-59sqc\" (UID: \"c87c11b9-cb13-4548-b9f3-f40b9962b739\") " pod="openshift-ingress-canary/ingress-canary-59sqc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.458814 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kwszg\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.459145 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab-certs\") pod \"machine-config-server-rv7nt\" (UID: \"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab\") " pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.459700 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab-node-bootstrap-token\") pod \"machine-config-server-rv7nt\" (UID: \"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab\") " pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.474790 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-mljqh"] Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.475740 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lt9h8\" (UniqueName: \"kubernetes.io/projected/8649ca07-00c1-4783-ba8c-3ab66f168149-kube-api-access-lt9h8\") pod \"ingress-operator-5b745b69d9-xvxpc\" (UID: \"8649ca07-00c1-4783-ba8c-3ab66f168149\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.480346 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d555l\" (UniqueName: \"kubernetes.io/projected/97f2dec2-800d-4308-9df6-61f5f94f2393-kube-api-access-d555l\") pod \"migrator-59844c95c7-xsjfr\" (UID: \"97f2dec2-800d-4308-9df6-61f5f94f2393\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.481368 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt"] Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.484148 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-mbmnz"] Nov 29 04:13:41 crc kubenswrapper[4631]: W1129 04:13:41.487596 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode4d8dab1_39ad_4c93_a452_5ecf8afda237.slice/crio-47ba2c3911c50fe097228d3c7eba3d15b78d76879733997551114e3d216455b5 WatchSource:0}: Error finding container 47ba2c3911c50fe097228d3c7eba3d15b78d76879733997551114e3d216455b5: Status 404 returned error can't find the container with id 47ba2c3911c50fe097228d3c7eba3d15b78d76879733997551114e3d216455b5 Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.501195 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.502708 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnlkq\" (UniqueName: \"kubernetes.io/projected/7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a-kube-api-access-vnlkq\") pod \"service-ca-9c57cc56f-xwkb8\" (UID: \"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a\") " pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.524856 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: E1129 04:13:41.525273 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.025260023 +0000 UTC m=+149.089763537 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.525577 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.542963 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvbr9\" (UniqueName: \"kubernetes.io/projected/9255468d-b26e-412e-84fd-ad5a94279720-kube-api-access-wvbr9\") pod \"cluster-samples-operator-665b6dd947-x67rw\" (UID: \"9255468d-b26e-412e-84fd-ad5a94279720\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.558948 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnqcw\" (UniqueName: \"kubernetes.io/projected/679c6cbe-89a8-458a-a798-5f70ef026702-kube-api-access-cnqcw\") pod \"package-server-manager-789f6589d5-pspvc\" (UID: \"679c6cbe-89a8-458a-a798-5f70ef026702\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.575046 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.575743 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7fg2\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-kube-api-access-m7fg2\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.577288 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.582790 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.588907 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qkbf\" (UniqueName: \"kubernetes.io/projected/96b28de4-d863-4d8a-86fc-f55986dde2cc-kube-api-access-2qkbf\") pod \"etcd-operator-b45778765-tfz6d\" (UID: \"96b28de4-d863-4d8a-86fc-f55986dde2cc\") " pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.591988 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wfn9\" (UniqueName: \"kubernetes.io/projected/98724a81-608d-48f1-b876-e4354b2ff65d-kube-api-access-5wfn9\") pod \"console-operator-58897d9998-4l8kb\" (UID: \"98724a81-608d-48f1-b876-e4354b2ff65d\") " pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.612157 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6kbj\" (UniqueName: \"kubernetes.io/projected/108bb116-2d8b-4e5c-90fa-fa22239d68ac-kube-api-access-k6kbj\") pod \"service-ca-operator-777779d784-bnmtn\" (UID: \"108bb116-2d8b-4e5c-90fa-fa22239d68ac\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.627459 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:41 crc kubenswrapper[4631]: E1129 04:13:41.628222 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.128193062 +0000 UTC m=+149.192696566 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.647204 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72qjg\" (UniqueName: \"kubernetes.io/projected/ca1d8fec-8229-4904-9fc3-178914884ea0-kube-api-access-72qjg\") pod \"kube-storage-version-migrator-operator-b67b599dd-m24c2\" (UID: \"ca1d8fec-8229-4904-9fc3-178914884ea0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.667843 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szpw2\" (UniqueName: \"kubernetes.io/projected/c2efa169-6700-4901-a36d-cdba29de5269-kube-api-access-szpw2\") pod \"openshift-controller-manager-operator-756b6f6bc6-hckm8\" (UID: \"c2efa169-6700-4901-a36d-cdba29de5269\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.700229 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2nwp\" (UniqueName: \"kubernetes.io/projected/8e76293e-e7ed-4b04-8941-d41fe405c987-kube-api-access-t2nwp\") pod \"catalog-operator-68c6474976-l66gm\" (UID: \"8e76293e-e7ed-4b04-8941-d41fe405c987\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.728106 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5v2p\" (UniqueName: \"kubernetes.io/projected/78552157-0b5f-437a-988a-71805a812ab2-kube-api-access-p5v2p\") pod \"collect-profiles-29406480-7l9nf\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.728908 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: E1129 04:13:41.729209 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.229197362 +0000 UTC m=+149.293700876 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.729469 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2tvl\" (UniqueName: \"kubernetes.io/projected/139e3088-813e-4ac6-8145-41aaf955cce6-kube-api-access-d2tvl\") pod \"dns-default-lsnw8\" (UID: \"139e3088-813e-4ac6-8145-41aaf955cce6\") " pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.751856 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.773285 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jg85\" (UniqueName: \"kubernetes.io/projected/59dfd767-fdf5-4034-85db-6ae66566ee59-kube-api-access-2jg85\") pod \"packageserver-d55dfcdfc-2jd8q\" (UID: \"59dfd767-fdf5-4034-85db-6ae66566ee59\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.773952 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.785077 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.786597 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl7jm\" (UniqueName: \"kubernetes.io/projected/73c987cc-2f81-4c56-a41d-1f8c9d601fa9-kube-api-access-tl7jm\") pod \"machine-config-controller-84d6567774-bf469\" (UID: \"73c987cc-2f81-4c56-a41d-1f8c9d601fa9\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.787155 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.808619 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp4ss\" (UniqueName: \"kubernetes.io/projected/c87c11b9-cb13-4548-b9f3-f40b9962b739-kube-api-access-wp4ss\") pod \"ingress-canary-59sqc\" (UID: \"c87c11b9-cb13-4548-b9f3-f40b9962b739\") " pod="openshift-ingress-canary/ingress-canary-59sqc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.828367 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl95w\" (UniqueName: \"kubernetes.io/projected/dba614c7-ceae-4ce5-afb6-6d082156f640-kube-api-access-pl95w\") pod \"marketplace-operator-79b997595-kwszg\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.828830 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.830351 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.839800 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" Nov 29 04:13:41 crc kubenswrapper[4631]: E1129 04:13:41.839972 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.339936843 +0000 UTC m=+149.404440347 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.840294 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.845141 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khmtz\" (UniqueName: \"kubernetes.io/projected/a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab-kube-api-access-khmtz\") pod \"machine-config-server-rv7nt\" (UID: \"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab\") " pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.852229 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.869267 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbxb5\" (UniqueName: \"kubernetes.io/projected/4977eaff-2a0a-4dac-8d85-bc2a207254cb-kube-api-access-vbxb5\") pod \"csi-hostpathplugin-zsmnf\" (UID: \"4977eaff-2a0a-4dac-8d85-bc2a207254cb\") " pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.892960 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.897453 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.915529 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.915815 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfffv\" (UniqueName: \"kubernetes.io/projected/d9c7d795-834c-498b-a012-b4d48c277f9c-kube-api-access-wfffv\") pod \"machine-config-operator-74547568cd-fqvnm\" (UID: \"d9c7d795-834c-498b-a012-b4d48c277f9c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.926888 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.927981 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.947312 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5"] Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.948849 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.949262 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.962118 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-59sqc" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.980982 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-lp9sb"] Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.981220 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.981671 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" Nov 29 04:13:41 crc kubenswrapper[4631]: I1129 04:13:41.987529 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rv7nt" Nov 29 04:13:41 crc kubenswrapper[4631]: E1129 04:13:41.988364 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.488314712 +0000 UTC m=+149.552818226 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.049596 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.050089 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.550075244 +0000 UTC m=+149.614578748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.090745 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" event={"ID":"86a965f6-f0d3-4745-a0fb-919f82d6159b","Type":"ContainerStarted","Data":"f601cb08d491250ba0b01e9c5662c8ed778df358459e991f3207bfdc1dcdb27b"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.090782 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" event={"ID":"86a965f6-f0d3-4745-a0fb-919f82d6159b","Type":"ContainerStarted","Data":"0005cb753845549b01d5a0169d013ba87db5ccfea211c3f8361efe3a87e5dae1"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.109920 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" event={"ID":"2099834e-bb35-49e5-b94b-06cf1d172cb2","Type":"ContainerStarted","Data":"822206c9e32161dc032c34a19f96c9e00865401ff63d0b9c88415503122c10fa"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.109962 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" event={"ID":"2099834e-bb35-49e5-b94b-06cf1d172cb2","Type":"ContainerStarted","Data":"27a2a226305dc9915f556bdf9f0f3e8f57175d25304f1a94b6b42565e1f6f08b"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.110307 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.131363 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.143430 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" event={"ID":"b1d2be36-2bd7-4b11-a4bc-111dde998ffb","Type":"ContainerStarted","Data":"d23363691dd28415fab77aa7465b0aa69077cc1292264dc0b7d1a34f0bd095ab"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.145291 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" event={"ID":"2de073f0-8f78-4378-9df5-758e30e7b896","Type":"ContainerStarted","Data":"2c558dcf4d580822affd71babcbe7b6bf845036497b802308b6f3fad59342355"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.145320 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" event={"ID":"2de073f0-8f78-4378-9df5-758e30e7b896","Type":"ContainerStarted","Data":"153da4b3b850a49f782f781cf4779510cf5def048ca932f7f1bc394a0257d3fe"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.167949 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.168856 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.668836333 +0000 UTC m=+149.733339847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.171056 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" event={"ID":"41cf001b-6d25-483e-ad01-03a028926fff","Type":"ContainerStarted","Data":"5223ff62d454b6ca5ec093010558f992caccda4d8495e0ca5e09189af355ce56"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.227449 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v"] Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.258005 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" event={"ID":"5e9aad10-398a-479a-b828-100682ad67c7","Type":"ContainerStarted","Data":"936aa887b0cde33df3117f8fac6294fe828499f5b3a5eec8edc6d94edac2a6af"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.258047 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" event={"ID":"5e9aad10-398a-479a-b828-100682ad67c7","Type":"ContainerStarted","Data":"15bf744a70a85a80ff93d62d3d1905ebc1a15499c61c51ccd2dc113e00b4eb5d"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.268807 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.268957 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.768933611 +0000 UTC m=+149.833437125 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.269077 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.269800 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.769790402 +0000 UTC m=+149.834293916 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.272808 4631 generic.go:334] "Generic (PLEG): container finished" podID="4e7b7b24-4b23-4ae5-842f-73e826f944d2" containerID="4053fc5ce76545fb966a903aa9978dca15ea8f7297e8fe5f7c795a8d13e5bbef" exitCode=0 Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.272871 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" event={"ID":"4e7b7b24-4b23-4ae5-842f-73e826f944d2","Type":"ContainerDied","Data":"4053fc5ce76545fb966a903aa9978dca15ea8f7297e8fe5f7c795a8d13e5bbef"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.288232 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-pbcfc" event={"ID":"e4d8dab1-39ad-4c93-a452-5ecf8afda237","Type":"ContainerStarted","Data":"47ba2c3911c50fe097228d3c7eba3d15b78d76879733997551114e3d216455b5"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.302460 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-896b9" event={"ID":"002a7abb-c9ed-4ae8-92da-b4985ff0643c","Type":"ContainerStarted","Data":"2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.302499 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-896b9" event={"ID":"002a7abb-c9ed-4ae8-92da-b4985ff0643c","Type":"ContainerStarted","Data":"c5f1eee8e998cf194d47953fb607a26423551858ac5870ba2efe7138fc2e75c7"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.304044 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-r9xmz" event={"ID":"01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8","Type":"ContainerStarted","Data":"7b5187f56e549c5ce7f0b5d662b474e6f9c4206bbf639728db54d180d2485cb2"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.304064 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-r9xmz" event={"ID":"01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8","Type":"ContainerStarted","Data":"bd312b3479fc14e4e39f17d5803a08d7d26d3ddb0b645822425df023dad54612"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.305150 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-r9xmz" Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.312983 4631 patch_prober.go:28] interesting pod/downloads-7954f5f757-r9xmz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.313022 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r9xmz" podUID="01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.330559 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" event={"ID":"0619cd40-96ab-4a00-b716-d7538b375a81","Type":"ContainerStarted","Data":"d24404931754d778fccb72b4da59613416254fac05b9fb402ef63b32975a26d4"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.345626 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" event={"ID":"9ed87add-9fae-43f8-acf1-e8b425d9afee","Type":"ContainerStarted","Data":"24127abc84f50b0e8edb2c94aa58ec4d0508680cabbf4cb732d0a853ddde988b"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.346537 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.347631 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x"] Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.358491 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" event={"ID":"7f223354-db6f-4227-9e64-39c01f942b11","Type":"ContainerStarted","Data":"8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.358525 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" event={"ID":"7f223354-db6f-4227-9e64-39c01f942b11","Type":"ContainerStarted","Data":"298a19d5de35ace548b2298314e4402300dc175607a950369c24282f53d9cfce"} Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.359155 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.361716 4631 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-swnf5 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.361774 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.370863 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.371723 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" event={"ID":"386dd0c3-88ce-4690-978a-0ecd6f029d5c","Type":"ContainerStarted","Data":"1a82d52e3d1840ca40a7ccdb201497eb22327bdd81ff4a629b678be65ce13d3d"} Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.372264 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.872246909 +0000 UTC m=+149.936750413 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.387605 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.464782 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr"] Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.473846 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.477228 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:42.977217667 +0000 UTC m=+150.041721181 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.498397 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr"] Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.547008 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-crmsz"] Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.575644 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc"] Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.576054 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.576213 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.076180697 +0000 UTC m=+150.140684211 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.576315 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.576765 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.076755881 +0000 UTC m=+150.141259395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: W1129 04:13:42.609577 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48e794c9_0aa9_42e3_94d2_27e97f141d7c.slice/crio-0cd7685398fd76b134309a5f0a4dfd8af09644f4fb7dc4703c31969db28b8fbc WatchSource:0}: Error finding container 0cd7685398fd76b134309a5f0a4dfd8af09644f4fb7dc4703c31969db28b8fbc: Status 404 returned error can't find the container with id 0cd7685398fd76b134309a5f0a4dfd8af09644f4fb7dc4703c31969db28b8fbc Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.678629 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.679118 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.179100565 +0000 UTC m=+150.243604079 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: W1129 04:13:42.727671 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod97f2dec2_800d_4308_9df6_61f5f94f2393.slice/crio-ae3dd3adbcb6f13c1b473ff2e607b5be2b6750c0bf285e03216059be7182549a WatchSource:0}: Error finding container ae3dd3adbcb6f13c1b473ff2e607b5be2b6750c0bf285e03216059be7182549a: Status 404 returned error can't find the container with id ae3dd3adbcb6f13c1b473ff2e607b5be2b6750c0bf285e03216059be7182549a Nov 29 04:13:42 crc kubenswrapper[4631]: W1129 04:13:42.750656 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1ca18ee_d4fc_4e20_9719_7647edaa6296.slice/crio-0d2e00cab7687e0eb30e37ed4faf4c99a851ace78ebbda06aa826f2abf86408a WatchSource:0}: Error finding container 0d2e00cab7687e0eb30e37ed4faf4c99a851ace78ebbda06aa826f2abf86408a: Status 404 returned error can't find the container with id 0d2e00cab7687e0eb30e37ed4faf4c99a851ace78ebbda06aa826f2abf86408a Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.791632 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.791962 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.291950938 +0000 UTC m=+150.356454452 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.867145 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" podStartSLOduration=131.867121751 podStartE2EDuration="2m11.867121751s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:42.86668227 +0000 UTC m=+149.931185784" watchObservedRunningTime="2025-11-29 04:13:42.867121751 +0000 UTC m=+149.931625265" Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.892560 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:42 crc kubenswrapper[4631]: E1129 04:13:42.892838 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.392823805 +0000 UTC m=+150.457327309 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.921204 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-xwkb8"] Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.947523 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" podStartSLOduration=130.947509643 podStartE2EDuration="2m10.947509643s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:42.945974876 +0000 UTC m=+150.010478400" watchObservedRunningTime="2025-11-29 04:13:42.947509643 +0000 UTC m=+150.012013157" Nov 29 04:13:42 crc kubenswrapper[4631]: I1129 04:13:42.998970 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:42.999307 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.49929602 +0000 UTC m=+150.563799534 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.082698 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4l8kb"] Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.103748 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:43.104132 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.604117835 +0000 UTC m=+150.668621349 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.121228 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-6s252" podStartSLOduration=132.121207396 podStartE2EDuration="2m12.121207396s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:43.095637266 +0000 UTC m=+150.160140780" watchObservedRunningTime="2025-11-29 04:13:43.121207396 +0000 UTC m=+150.185710910" Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.157478 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-tfz6d"] Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.202816 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn"] Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.207140 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:43.207647 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.707631188 +0000 UTC m=+150.772134702 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: W1129 04:13:43.243541 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c82f5bd_7a32_42f8_9ff0_1d1aae08d66a.slice/crio-5d08c1d9939884959c278b7a2299c8add1971afc95f080fd5d56e3e41ce1c379 WatchSource:0}: Error finding container 5d08c1d9939884959c278b7a2299c8add1971afc95f080fd5d56e3e41ce1c379: Status 404 returned error can't find the container with id 5d08c1d9939884959c278b7a2299c8add1971afc95f080fd5d56e3e41ce1c379 Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.244840 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-r9xmz" podStartSLOduration=132.244822975 podStartE2EDuration="2m12.244822975s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:43.243757798 +0000 UTC m=+150.308261312" watchObservedRunningTime="2025-11-29 04:13:43.244822975 +0000 UTC m=+150.309326479" Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.296498 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:43 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:43 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:43 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.296830 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.299958 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-mbmnz" podStartSLOduration=132.299948244 podStartE2EDuration="2m12.299948244s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:43.299690158 +0000 UTC m=+150.364193672" watchObservedRunningTime="2025-11-29 04:13:43.299948244 +0000 UTC m=+150.364451758" Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.311736 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:43.313155 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.813132639 +0000 UTC m=+150.877636153 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.322890 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:43.323315 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.82330213 +0000 UTC m=+150.887805644 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.328872 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.396127 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-pbcfc" podStartSLOduration=132.396103985 podStartE2EDuration="2m12.396103985s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:43.35455651 +0000 UTC m=+150.419060024" watchObservedRunningTime="2025-11-29 04:13:43.396103985 +0000 UTC m=+150.460607499" Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.431920 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:43.432426 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:43.9324113 +0000 UTC m=+150.996914814 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.471366 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-896b9" podStartSLOduration=132.47135199 podStartE2EDuration="2m12.47135199s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:43.470756026 +0000 UTC m=+150.535259540" watchObservedRunningTime="2025-11-29 04:13:43.47135199 +0000 UTC m=+150.535855504" Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.532696 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" event={"ID":"b1d2be36-2bd7-4b11-a4bc-111dde998ffb","Type":"ContainerStarted","Data":"fb5de053a3756d87bd4b337494ba6c28ca88745a73ba697a41f82970132a5f58"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.532982 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:43.533283 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:44.033270917 +0000 UTC m=+151.097774421 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.560312 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" event={"ID":"8361abea-c6bc-4927-a88b-c8318096d60d","Type":"ContainerStarted","Data":"ca6978f2d9cf5f7b5d22118577a26f620a797ca95441a369d7cdce253e47898c"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.562030 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" podStartSLOduration=132.562021876 podStartE2EDuration="2m12.562021876s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:43.546852912 +0000 UTC m=+150.611356426" watchObservedRunningTime="2025-11-29 04:13:43.562021876 +0000 UTC m=+150.626525390" Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.563097 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc"] Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.574589 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" event={"ID":"48e794c9-0aa9-42e3-94d2-27e97f141d7c","Type":"ContainerStarted","Data":"0cd7685398fd76b134309a5f0a4dfd8af09644f4fb7dc4703c31969db28b8fbc"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.625620 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"79a92550fd70c6451ba3f3be9eb4f8abbc3dfd7cce73f85d76e818e218d30872"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.633803 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:43.643930 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:44.143907235 +0000 UTC m=+151.208410749 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.659766 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc"] Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.663742 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" event={"ID":"0619cd40-96ab-4a00-b716-d7538b375a81","Type":"ContainerStarted","Data":"c16c3d41fa7e575625b2e13983247c099b0360d8f42e6a5b991b14190647d8a3"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.672009 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" event={"ID":"bc555ddf-f4df-4328-8396-c2ddaeb49ea0","Type":"ContainerStarted","Data":"bba30706dc38ab1db7b60bc4f45181003a6f56f4f3740d5dfe51e5fa5b82f7c3"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.692516 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" event={"ID":"2794d648-d3ac-4540-85be-c80a1997ff1c","Type":"ContainerStarted","Data":"5548d80c5fe01ae67c8e87fb1991d06395c9d3c7f7fdbdf8f47531a243e746fe"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.724682 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f681b4963755a3b35cd59efe682da8e023bea412e1af3f90dc4d88c4ffbe0d8f"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.729733 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" event={"ID":"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a","Type":"ContainerStarted","Data":"5d08c1d9939884959c278b7a2299c8add1971afc95f080fd5d56e3e41ce1c379"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.734728 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:43.734993 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:44.234981901 +0000 UTC m=+151.299485415 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.736174 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-txv66" podStartSLOduration=131.73616464 podStartE2EDuration="2m11.73616464s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:43.707034642 +0000 UTC m=+150.771538156" watchObservedRunningTime="2025-11-29 04:13:43.73616464 +0000 UTC m=+150.800668154" Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.774568 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" event={"ID":"9ed87add-9fae-43f8-acf1-e8b425d9afee","Type":"ContainerStarted","Data":"42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.799414 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw"] Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.835909 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:43.838780 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:44.33876654 +0000 UTC m=+151.403270054 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.845767 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kwszg"] Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.846366 4631 generic.go:334] "Generic (PLEG): container finished" podID="41cf001b-6d25-483e-ad01-03a028926fff" containerID="dc33b189306b608dd7c8e03bae7bb2299ddaf2b5d85d5232b2b06a7b3f776293" exitCode=0 Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.846413 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" event={"ID":"41cf001b-6d25-483e-ad01-03a028926fff","Type":"ContainerDied","Data":"dc33b189306b608dd7c8e03bae7bb2299ddaf2b5d85d5232b2b06a7b3f776293"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.885676 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" event={"ID":"257f5a70-3421-4c15-acec-c898cb9c4fe6","Type":"ContainerStarted","Data":"579b7430400b8bd8aae2c7e677d4789b64bac1280a7510ee2862eee01a0e24dc"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.887015 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" event={"ID":"3be4bf0e-d446-4474-900c-0a5aec6450a5","Type":"ContainerStarted","Data":"a7a856818d6e5fc8fd98b352c8c4f86ea7aaeacb93078709f30718f2d47607a6"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.888406 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr" event={"ID":"97f2dec2-800d-4308-9df6-61f5f94f2393","Type":"ContainerStarted","Data":"ae3dd3adbcb6f13c1b473ff2e607b5be2b6750c0bf285e03216059be7182549a"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.889775 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-pbcfc" event={"ID":"e4d8dab1-39ad-4c93-a452-5ecf8afda237","Type":"ContainerStarted","Data":"bac57e473a623949fe511a538719c5ade11deab9bf8e6b6b0bc903772549e5b9"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.917430 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.924934 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2"] Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.938168 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:43 crc kubenswrapper[4631]: E1129 04:13:43.940451 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:44.440436916 +0000 UTC m=+151.504940430 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:43 crc kubenswrapper[4631]: W1129 04:13:43.948615 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8649ca07_00c1_4783_ba8c_3ab66f168149.slice/crio-e5099e1d9195c325f0df803a76d5322e8f65401c8ff8fdc864dd20568efa0723 WatchSource:0}: Error finding container e5099e1d9195c325f0df803a76d5322e8f65401c8ff8fdc864dd20568efa0723: Status 404 returned error can't find the container with id e5099e1d9195c325f0df803a76d5322e8f65401c8ff8fdc864dd20568efa0723 Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.948798 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" event={"ID":"f1ca18ee-d4fc-4e20-9719-7647edaa6296","Type":"ContainerStarted","Data":"0d2e00cab7687e0eb30e37ed4faf4c99a851ace78ebbda06aa826f2abf86408a"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.975898 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"a8b32cb7e11b6396e3f6f2d41344a35057374acce48e791dd9bd459e52d34ad4"} Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.984616 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bf469"] Nov 29 04:13:43 crc kubenswrapper[4631]: I1129 04:13:43.984667 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8"] Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.026319 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rv7nt" event={"ID":"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab","Type":"ContainerStarted","Data":"cd4ff6a7e0e8056b67fbbb732cc31b6e3d866e258e0ed3413bd8626da11bfda4"} Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.042725 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:44 crc kubenswrapper[4631]: E1129 04:13:44.043610 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:44.54359579 +0000 UTC m=+151.608099304 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.046566 4631 generic.go:334] "Generic (PLEG): container finished" podID="386dd0c3-88ce-4690-978a-0ecd6f029d5c" containerID="a2270f4ceb7683849f118369169bd21105d6f78e55958d96689b1bdba4c89601" exitCode=0 Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.047025 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" event={"ID":"386dd0c3-88ce-4690-978a-0ecd6f029d5c","Type":"ContainerDied","Data":"a2270f4ceb7683849f118369169bd21105d6f78e55958d96689b1bdba4c89601"} Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.049118 4631 patch_prober.go:28] interesting pod/downloads-7954f5f757-r9xmz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.049167 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r9xmz" podUID="01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.069222 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf"] Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.078066 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zsmnf"] Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.158852 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.164621 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-ddddt" podStartSLOduration=133.164603764 podStartE2EDuration="2m13.164603764s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:44.123853239 +0000 UTC m=+151.188356753" watchObservedRunningTime="2025-11-29 04:13:44.164603764 +0000 UTC m=+151.229107278" Nov 29 04:13:44 crc kubenswrapper[4631]: E1129 04:13:44.170796 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:44.670770886 +0000 UTC m=+151.735274400 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.265132 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:44 crc kubenswrapper[4631]: E1129 04:13:44.265402 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:44.765386279 +0000 UTC m=+151.829889793 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.310661 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:44 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:44 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:44 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.310952 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.311339 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm"] Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.317893 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-59sqc"] Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.324117 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm"] Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.367933 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:44 crc kubenswrapper[4631]: E1129 04:13:44.368246 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:44.868235235 +0000 UTC m=+151.932738739 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.415402 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q"] Nov 29 04:13:44 crc kubenswrapper[4631]: W1129 04:13:44.444321 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4977eaff_2a0a_4dac_8d85_bc2a207254cb.slice/crio-2debd407b46d4e0bf1f7076cdee714df456878ae63385f05c7c5185fc0310bc3 WatchSource:0}: Error finding container 2debd407b46d4e0bf1f7076cdee714df456878ae63385f05c7c5185fc0310bc3: Status 404 returned error can't find the container with id 2debd407b46d4e0bf1f7076cdee714df456878ae63385f05c7c5185fc0310bc3 Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.469858 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:44 crc kubenswrapper[4631]: E1129 04:13:44.470081 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:44.970068776 +0000 UTC m=+152.034572280 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.473624 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-lsnw8"] Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.571465 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:44 crc kubenswrapper[4631]: E1129 04:13:44.571745 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:45.071733923 +0000 UTC m=+152.136237437 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.674628 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:44 crc kubenswrapper[4631]: E1129 04:13:44.674892 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:45.174872266 +0000 UTC m=+152.239375780 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.777816 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:44 crc kubenswrapper[4631]: E1129 04:13:44.778928 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:45.278914501 +0000 UTC m=+152.343418015 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:44 crc kubenswrapper[4631]: E1129 04:13:44.905832 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:45.405816531 +0000 UTC m=+152.470320045 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.905854 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:44 crc kubenswrapper[4631]: I1129 04:13:44.906035 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:44 crc kubenswrapper[4631]: E1129 04:13:44.906284 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:45.406278182 +0000 UTC m=+152.470781696 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.006851 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:45 crc kubenswrapper[4631]: E1129 04:13:45.007103 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:45.507088918 +0000 UTC m=+152.571592432 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.079199 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" event={"ID":"73c987cc-2f81-4c56-a41d-1f8c9d601fa9","Type":"ContainerStarted","Data":"8858d38de7658ae51470d6f2e6674ef99d03c06dee3e80133fac8f27a08d8848"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.107877 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:45 crc kubenswrapper[4631]: E1129 04:13:45.108319 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:45.608307804 +0000 UTC m=+152.672811318 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.117065 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" event={"ID":"4e7b7b24-4b23-4ae5-842f-73e826f944d2","Type":"ContainerStarted","Data":"f8233f58b7dd049fe3c1d78651c8e8189c782b0f40c1ce278bf27ca1d783ca83"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.123148 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" event={"ID":"0619cd40-96ab-4a00-b716-d7538b375a81","Type":"ContainerStarted","Data":"51997d12556d34efe89e21c714c267bf0070d7bc8a361e79e68484d55acf11f7"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.126202 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" event={"ID":"108bb116-2d8b-4e5c-90fa-fa22239d68ac","Type":"ContainerStarted","Data":"d7850c25d420f93a0350290d816861dffda36bf06c491bfa0d12b5c21cc9acab"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.132077 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" event={"ID":"386dd0c3-88ce-4690-978a-0ecd6f029d5c","Type":"ContainerStarted","Data":"3f84aa5d2df887109a643d57de6f2408db664c55112e086996d2ca82b45b9ffd"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.138885 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" event={"ID":"9255468d-b26e-412e-84fd-ad5a94279720","Type":"ContainerStarted","Data":"abb9ed133783b6c50d10907bd9ec92bc64a8662f3bf3b003573bd2931abb769c"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.144123 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" event={"ID":"4977eaff-2a0a-4dac-8d85-bc2a207254cb","Type":"ContainerStarted","Data":"2debd407b46d4e0bf1f7076cdee714df456878ae63385f05c7c5185fc0310bc3"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.178842 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" event={"ID":"2794d648-d3ac-4540-85be-c80a1997ff1c","Type":"ContainerStarted","Data":"4181c4c6f5a43b2235b45164bd9b559d61bc85cd8c04478c619e572796486467"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.179417 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.180263 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" podStartSLOduration=133.180244577 podStartE2EDuration="2m13.180244577s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:45.1787255 +0000 UTC m=+152.243229014" watchObservedRunningTime="2025-11-29 04:13:45.180244577 +0000 UTC m=+152.244748091" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.185970 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" event={"ID":"d9c7d795-834c-498b-a012-b4d48c277f9c","Type":"ContainerStarted","Data":"07f4240f996bbb9201eaa4e470f14d16451faee4a175f228e2fcaae6bd87d45a"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.189963 4631 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-vqqsc container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.190062 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" podUID="2794d648-d3ac-4540-85be-c80a1997ff1c" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.191416 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" event={"ID":"8649ca07-00c1-4783-ba8c-3ab66f168149","Type":"ContainerStarted","Data":"e5099e1d9195c325f0df803a76d5322e8f65401c8ff8fdc864dd20568efa0723"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.203700 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" event={"ID":"8361abea-c6bc-4927-a88b-c8318096d60d","Type":"ContainerStarted","Data":"b0f08f84b81d231d45cf45c34f662a7117941f4e2022af143db62e2fa4969e01"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.206880 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" event={"ID":"257f5a70-3421-4c15-acec-c898cb9c4fe6","Type":"ContainerStarted","Data":"ddd37f15441484f622bbe55c7d9a3bf5cd605a52bfd48dfb99ee4eb6144644d3"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.213840 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:45 crc kubenswrapper[4631]: E1129 04:13:45.214755 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:45.714741318 +0000 UTC m=+152.779244832 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.252222 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:45 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:45 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:45 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.252274 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.264038 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-lsnw8" event={"ID":"139e3088-813e-4ac6-8145-41aaf955cce6","Type":"ContainerStarted","Data":"53e73c5d73a18447dd37b9a5e5a168464eaf5b52e91559dd59368eba64c71993"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.270119 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"b8e643aca6fed0d6efe8ae99baf98dc08f364cbf9b9744529ea26e4f3353372f"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.270667 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.282793 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-59sqc" event={"ID":"c87c11b9-cb13-4548-b9f3-f40b9962b739","Type":"ContainerStarted","Data":"da4d115daf01076a018efa4d0f2b7c3a85c68761fac11a1f9cba69f31974b18f"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.290345 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9cjqm" podStartSLOduration=134.290315332 podStartE2EDuration="2m14.290315332s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:45.289718297 +0000 UTC m=+152.354221811" watchObservedRunningTime="2025-11-29 04:13:45.290315332 +0000 UTC m=+152.354818846" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.319052 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:45 crc kubenswrapper[4631]: E1129 04:13:45.320135 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:45.820121227 +0000 UTC m=+152.884624741 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.325626 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"163f2a0859db2f1eed0c01d89d131bca258e44ea6b5611f6d2923a2b9820540c"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.351892 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" podStartSLOduration=133.35187716 podStartE2EDuration="2m13.35187716s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:45.351712896 +0000 UTC m=+152.416216410" watchObservedRunningTime="2025-11-29 04:13:45.35187716 +0000 UTC m=+152.416380674" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.368676 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" event={"ID":"679c6cbe-89a8-458a-a798-5f70ef026702","Type":"ContainerStarted","Data":"761de9187e99fcd350e9e376c51702fe416f6d4569c88deaf0119574887298c9"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.370984 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" event={"ID":"96b28de4-d863-4d8a-86fc-f55986dde2cc","Type":"ContainerStarted","Data":"926565817cec5cdee4e7e91f392527aa514527a2c004485393135c43382a1d3f"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.383743 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" event={"ID":"ca1d8fec-8229-4904-9fc3-178914884ea0","Type":"ContainerStarted","Data":"965f2a01ffc0c0c480332237cebda4a0e35e83b9f7582db5e319b187f45262c3"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.390032 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" event={"ID":"c2efa169-6700-4901-a36d-cdba29de5269","Type":"ContainerStarted","Data":"e86d3ad45d23d8b203934455d37e3d2bdda6986534516475b005b4f9aa353be0"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.393880 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" event={"ID":"dba614c7-ceae-4ce5-afb6-6d082156f640","Type":"ContainerStarted","Data":"452c7232c4562035acea8c7c690fb02492e6e73fe54f38a746cc6b2136e24a85"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.402201 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" event={"ID":"8e76293e-e7ed-4b04-8941-d41fe405c987","Type":"ContainerStarted","Data":"827aa015326a26ba196c1da1998328454657559f4b5629cb3cc943bac85396d4"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.404590 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" event={"ID":"78552157-0b5f-437a-988a-71805a812ab2","Type":"ContainerStarted","Data":"fc8f497743f2da67fe0c30654c68373427c57ebce040f07330c4e466e9ad2d5a"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.405834 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rv7nt" event={"ID":"a6bebfbe-c42b-44c0-b2d2-4476b7edf2ab","Type":"ContainerStarted","Data":"a14152d171a22908d00fe189e4ba0aade291839cad55d12ad739e01207007e61"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.407304 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" event={"ID":"59dfd767-fdf5-4034-85db-6ae66566ee59","Type":"ContainerStarted","Data":"a1b98778ceef9ec80d7da24c64fa281e43ef9f1897322cbe225e9a399c15a3d5"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.418601 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-4l8kb" event={"ID":"98724a81-608d-48f1-b876-e4354b2ff65d","Type":"ContainerStarted","Data":"698879f48b5a64685d35c4e5768b64bb3280691ad49b07bb7f848c8644345c2c"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.425547 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:45 crc kubenswrapper[4631]: E1129 04:13:45.426781 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:45.926729005 +0000 UTC m=+152.991232519 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.485448 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr" event={"ID":"97f2dec2-800d-4308-9df6-61f5f94f2393","Type":"ContainerStarted","Data":"67b2832d2905b0e93b16bad07154925fc4a220d06fdd102507be7440ede09d5c"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.503504 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-x8t8v" podStartSLOduration=133.503488018 podStartE2EDuration="2m13.503488018s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:45.502697609 +0000 UTC m=+152.567201123" watchObservedRunningTime="2025-11-29 04:13:45.503488018 +0000 UTC m=+152.567991532" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.506374 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" event={"ID":"48e794c9-0aa9-42e3-94d2-27e97f141d7c","Type":"ContainerStarted","Data":"772097b9ee45c05aca39587f74cd6fe71633c125a1ed6e7402325a7c04e7a441"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.529371 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:45 crc kubenswrapper[4631]: E1129 04:13:45.530393 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.030374931 +0000 UTC m=+153.094878445 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.564817 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"f9c311b4db827151d6fcf3ca4688bc4a840e101a5944dfafaac2384e20d32878"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.566565 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.566612 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.579429 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" event={"ID":"bc555ddf-f4df-4328-8396-c2ddaeb49ea0","Type":"ContainerStarted","Data":"0dbc1eeb6ff797b8c4a62396f4728fa7ea4a4f836542b77faf9105bd36934cf5"} Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.581206 4631 patch_prober.go:28] interesting pod/downloads-7954f5f757-r9xmz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.581231 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r9xmz" podUID="01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.633445 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:45 crc kubenswrapper[4631]: E1129 04:13:45.634739 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.134724534 +0000 UTC m=+153.199228048 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.654492 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4t9l5" podStartSLOduration=134.654475711 podStartE2EDuration="2m14.654475711s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:45.578576359 +0000 UTC m=+152.643079873" watchObservedRunningTime="2025-11-29 04:13:45.654475711 +0000 UTC m=+152.718979215" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.730384 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-rv7nt" podStartSLOduration=7.730361902 podStartE2EDuration="7.730361902s" podCreationTimestamp="2025-11-29 04:13:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:45.656729757 +0000 UTC m=+152.721233271" watchObservedRunningTime="2025-11-29 04:13:45.730361902 +0000 UTC m=+152.794865426" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.736011 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:45 crc kubenswrapper[4631]: E1129 04:13:45.736420 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.236404261 +0000 UTC m=+153.300907775 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.787265 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-x5s4x" podStartSLOduration=133.787246985 podStartE2EDuration="2m13.787246985s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:45.78582396 +0000 UTC m=+152.850327464" watchObservedRunningTime="2025-11-29 04:13:45.787246985 +0000 UTC m=+152.851750499" Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.846991 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:45 crc kubenswrapper[4631]: E1129 04:13:45.847373 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.347358677 +0000 UTC m=+153.411862191 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:45 crc kubenswrapper[4631]: I1129 04:13:45.950142 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:45 crc kubenswrapper[4631]: E1129 04:13:45.950457 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.450445789 +0000 UTC m=+153.514949303 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.051571 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.051733 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.551708446 +0000 UTC m=+153.616211960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.051793 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.052151 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.552143747 +0000 UTC m=+153.616647261 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.153307 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.153923 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.653908266 +0000 UTC m=+153.718411780 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.256342 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.256602 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.756591478 +0000 UTC m=+153.821094992 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.258641 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:46 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:46 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:46 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.258685 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.358829 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.359391 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.859376213 +0000 UTC m=+153.923879727 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.460174 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.460558 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:46.960544397 +0000 UTC m=+154.025047911 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.561030 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.561421 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:47.061406894 +0000 UTC m=+154.125910408 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.593825 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" event={"ID":"d9c7d795-834c-498b-a012-b4d48c277f9c","Type":"ContainerStarted","Data":"5040ac173604e1c477b45a9a5370f9cbce23cd877175e53a3c8993f6a8e1d3f0"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.594770 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" event={"ID":"96b28de4-d863-4d8a-86fc-f55986dde2cc","Type":"ContainerStarted","Data":"0480ffb582719aef83d3c7998a70bdb2f43b5ec1bc612865c58c077468272fa5"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.596615 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" event={"ID":"8649ca07-00c1-4783-ba8c-3ab66f168149","Type":"ContainerStarted","Data":"86de49c895ff327d7d17c4b731756d99099f492f6eabf54b9b5353585c7fc4f7"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.602435 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" event={"ID":"78552157-0b5f-437a-988a-71805a812ab2","Type":"ContainerStarted","Data":"69dc560d11ca0b45c0f2bcbea2b6c8f2a41940a43fb4bd5cfbc5ddea9af4bb14"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.608192 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" event={"ID":"bc555ddf-f4df-4328-8396-c2ddaeb49ea0","Type":"ContainerStarted","Data":"19a9e77ef06f72874dd1766aa5f72eca356239663837c167a90df71719410e20"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.616622 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" event={"ID":"59dfd767-fdf5-4034-85db-6ae66566ee59","Type":"ContainerStarted","Data":"28531ba8cec29b43d4b7da4237f3f24279709f0ba62d0ec5c1ca0610833e54b7"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.617374 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.618578 4631 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-2jd8q container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" start-of-body= Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.618622 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" podUID="59dfd767-fdf5-4034-85db-6ae66566ee59" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.635797 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" event={"ID":"73c987cc-2f81-4c56-a41d-1f8c9d601fa9","Type":"ContainerStarted","Data":"c1c359fb7ae7f79443663263c4c635dd5e9fed825e1fa633cdf5f81cf7cd6be0"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.643429 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" event={"ID":"41cf001b-6d25-483e-ad01-03a028926fff","Type":"ContainerStarted","Data":"2838b5f4c4b375a81319c62f77edae910a5bdddec0899c053d25369e87508ae7"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.643466 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.663054 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.665288 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" event={"ID":"386dd0c3-88ce-4690-978a-0ecd6f029d5c","Type":"ContainerStarted","Data":"e451dcddb847ccb5bd1066c05223e7de1d9f859d3f659334988c54427a390a89"} Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.666011 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:47.165999713 +0000 UTC m=+154.230503227 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.704417 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.714811 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-59sqc" event={"ID":"c87c11b9-cb13-4548-b9f3-f40b9962b739","Type":"ContainerStarted","Data":"17e9a9e7dbc04d39c11988da9051d96785f2a8285e9d03105161c8c583c8413f"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.715813 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" podStartSLOduration=134.715797971 podStartE2EDuration="2m14.715797971s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:46.714838767 +0000 UTC m=+153.779342281" watchObservedRunningTime="2025-11-29 04:13:46.715797971 +0000 UTC m=+153.780301485" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.717522 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-tfz6d" podStartSLOduration=135.717516004 podStartE2EDuration="2m15.717516004s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:46.652161932 +0000 UTC m=+153.716665446" watchObservedRunningTime="2025-11-29 04:13:46.717516004 +0000 UTC m=+153.782019518" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.734121 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" event={"ID":"3be4bf0e-d446-4474-900c-0a5aec6450a5","Type":"ContainerStarted","Data":"2a1d1848267e5985561fcaea59ea88d6bb9d5da1f195439b4ccf2c056d2e3194"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.747577 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" event={"ID":"f1ca18ee-d4fc-4e20-9719-7647edaa6296","Type":"ContainerStarted","Data":"d0832733c8696d5332927d0c557e8a74104521c70e866af0ec38e40c25f3ff90"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.763212 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" podStartSLOduration=135.76319417 podStartE2EDuration="2m15.76319417s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:46.762250087 +0000 UTC m=+153.826753601" watchObservedRunningTime="2025-11-29 04:13:46.76319417 +0000 UTC m=+153.827697684" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.765725 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.766823 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:47.266810689 +0000 UTC m=+154.331314203 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.770880 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" event={"ID":"c2efa169-6700-4901-a36d-cdba29de5269","Type":"ContainerStarted","Data":"50375f2675a9eca8e69b6380bf72b0d21babd2d1ad080a3826af9d119772bd8b"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.789968 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" event={"ID":"ca1d8fec-8229-4904-9fc3-178914884ea0","Type":"ContainerStarted","Data":"73cd6ef3062173a74499e7f0b4130d364b747c27e4593343488b3e3af94e20b0"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.804548 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" event={"ID":"dba614c7-ceae-4ce5-afb6-6d082156f640","Type":"ContainerStarted","Data":"0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.805361 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.812603 4631 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-kwszg container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.812690 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.820189 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr" podStartSLOduration=134.820172685 podStartE2EDuration="2m14.820172685s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:46.819498158 +0000 UTC m=+153.884001672" watchObservedRunningTime="2025-11-29 04:13:46.820172685 +0000 UTC m=+153.884676199" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.822004 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-4l8kb" event={"ID":"98724a81-608d-48f1-b876-e4354b2ff65d","Type":"ContainerStarted","Data":"feaa545bff2d475e0ee7364ea676ca7d7c42f660e82abd4107cb6e84385df58e"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.822572 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.824474 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" event={"ID":"8e76293e-e7ed-4b04-8941-d41fe405c987","Type":"ContainerStarted","Data":"75dc49a149a281f6d59e07f444ded58887f540032c9ad6a86bdf938dceda8278"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.825317 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.825613 4631 patch_prober.go:28] interesting pod/console-operator-58897d9998-4l8kb container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/readyz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.825651 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4l8kb" podUID="98724a81-608d-48f1-b876-e4354b2ff65d" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/readyz\": dial tcp 10.217.0.23:8443: connect: connection refused" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.832126 4631 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-l66gm container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.832161 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" podUID="8e76293e-e7ed-4b04-8941-d41fe405c987" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.840134 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-lp9sb" podStartSLOduration=134.840119267 podStartE2EDuration="2m14.840119267s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:46.839709587 +0000 UTC m=+153.904213101" watchObservedRunningTime="2025-11-29 04:13:46.840119267 +0000 UTC m=+153.904622781" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.840787 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" event={"ID":"108bb116-2d8b-4e5c-90fa-fa22239d68ac","Type":"ContainerStarted","Data":"14dbbb472bad63d06d531bd5faa2afc534ab9867d4b41791de231e0ae90b4a4e"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.861645 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" event={"ID":"7c82f5bd-7a32-42f8-9ff0-1d1aae08d66a","Type":"ContainerStarted","Data":"65836ab6502eef2b2b43a6eedcd37dbd3c1377d8b3b73e219ce789234a21c3a9"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.867231 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.876799 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:47.376784001 +0000 UTC m=+154.441287515 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.892474 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" event={"ID":"679c6cbe-89a8-458a-a798-5f70ef026702","Type":"ContainerStarted","Data":"38279f1bf9cabf9306299744cfea82ecf7786bf4fbb8c62424d627e22a1fc68e"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.892510 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" event={"ID":"679c6cbe-89a8-458a-a798-5f70ef026702","Type":"ContainerStarted","Data":"0603d687b6efaf023280d7f991b0440d5e6decf2e65cc24f911d6ff61a7fd24e"} Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.904897 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" podStartSLOduration=135.904882824 podStartE2EDuration="2m15.904882824s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:46.903723675 +0000 UTC m=+153.968227189" watchObservedRunningTime="2025-11-29 04:13:46.904882824 +0000 UTC m=+153.969386338" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.915599 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vqqsc" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.919024 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bh74" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.974266 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" podStartSLOduration=135.974252314 podStartE2EDuration="2m15.974252314s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:46.972311886 +0000 UTC m=+154.036815410" watchObservedRunningTime="2025-11-29 04:13:46.974252314 +0000 UTC m=+154.038755828" Nov 29 04:13:46 crc kubenswrapper[4631]: I1129 04:13:46.984790 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:46 crc kubenswrapper[4631]: E1129 04:13:46.985825 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:47.485792239 +0000 UTC m=+154.550295753 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.051663 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-xwkb8" podStartSLOduration=135.051644692 podStartE2EDuration="2m15.051644692s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.019627313 +0000 UTC m=+154.084130827" watchObservedRunningTime="2025-11-29 04:13:47.051644692 +0000 UTC m=+154.116148206" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.053297 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qrnbr" podStartSLOduration=135.053292283 podStartE2EDuration="2m15.053292283s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.052177626 +0000 UTC m=+154.116681140" watchObservedRunningTime="2025-11-29 04:13:47.053292283 +0000 UTC m=+154.117795797" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.098069 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.104931 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:47.604917176 +0000 UTC m=+154.669420690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.133444 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-hckm8" podStartSLOduration=136.133426889 podStartE2EDuration="2m16.133426889s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.097142994 +0000 UTC m=+154.161646498" watchObservedRunningTime="2025-11-29 04:13:47.133426889 +0000 UTC m=+154.197930403" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.165301 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-4l8kb" podStartSLOduration=136.165286995 podStartE2EDuration="2m16.165286995s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.134404393 +0000 UTC m=+154.198907907" watchObservedRunningTime="2025-11-29 04:13:47.165286995 +0000 UTC m=+154.229790509" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.165895 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" podStartSLOduration=135.16589118 podStartE2EDuration="2m15.16589118s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.162997908 +0000 UTC m=+154.227501412" watchObservedRunningTime="2025-11-29 04:13:47.16589118 +0000 UTC m=+154.230394694" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.186092 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" podStartSLOduration=135.186075027 podStartE2EDuration="2m15.186075027s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.185894293 +0000 UTC m=+154.250397807" watchObservedRunningTime="2025-11-29 04:13:47.186075027 +0000 UTC m=+154.250578541" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.201840 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.202228 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:47.702210525 +0000 UTC m=+154.766714039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.243790 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-m24c2" podStartSLOduration=135.24377749 podStartE2EDuration="2m15.24377749s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.215965864 +0000 UTC m=+154.280469378" watchObservedRunningTime="2025-11-29 04:13:47.24377749 +0000 UTC m=+154.308281004" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.245000 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-59sqc" podStartSLOduration=9.24499363 podStartE2EDuration="9.24499363s" podCreationTimestamp="2025-11-29 04:13:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.242720084 +0000 UTC m=+154.307223598" watchObservedRunningTime="2025-11-29 04:13:47.24499363 +0000 UTC m=+154.309497144" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.260079 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:47 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:47 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:47 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.260363 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.305030 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.305364 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:47.805353239 +0000 UTC m=+154.869856753 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.307399 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-bnmtn" podStartSLOduration=135.307386459 podStartE2EDuration="2m15.307386459s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.305751928 +0000 UTC m=+154.370255442" watchObservedRunningTime="2025-11-29 04:13:47.307386459 +0000 UTC m=+154.371889973" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.333472 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" podStartSLOduration=135.333458022 podStartE2EDuration="2m15.333458022s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.333171244 +0000 UTC m=+154.397674758" watchObservedRunningTime="2025-11-29 04:13:47.333458022 +0000 UTC m=+154.397961536" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.406444 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.406741 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:47.906724728 +0000 UTC m=+154.971228242 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.507710 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.508053 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.008040705 +0000 UTC m=+155.072544219 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.608746 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.608979 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.108953244 +0000 UTC m=+155.173456758 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.609043 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.609343 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.109315873 +0000 UTC m=+155.173819387 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.710369 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.710641 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.210627111 +0000 UTC m=+155.275130625 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.811992 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.812349 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.312322508 +0000 UTC m=+155.376826022 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.910898 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-xsjfr" event={"ID":"97f2dec2-800d-4308-9df6-61f5f94f2393","Type":"ContainerStarted","Data":"6980e6544d177b4bf6461e44e0728e247b83b159296a3932c8b536ce38d57556"} Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.912481 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.912597 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.41256001 +0000 UTC m=+155.477063524 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.912683 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:47 crc kubenswrapper[4631]: E1129 04:13:47.912969 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.41295767 +0000 UTC m=+155.477461184 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.913470 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" event={"ID":"73c987cc-2f81-4c56-a41d-1f8c9d601fa9","Type":"ContainerStarted","Data":"29a685cec473adb41f5d5187c6b8934427d1bbe40f46a667862e6b39773b91da"} Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.915706 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" event={"ID":"8649ca07-00c1-4783-ba8c-3ab66f168149","Type":"ContainerStarted","Data":"e3ec8166a442c09bd27f3c8f4740befd76d981eaf1cc35bfb79b11ca373d30c5"} Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.927110 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" event={"ID":"9255468d-b26e-412e-84fd-ad5a94279720","Type":"ContainerStarted","Data":"ec944d1eec794873ef7e5521f16f9098c4e3142d13abf3a7cba0af27937dcf76"} Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.927159 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" event={"ID":"9255468d-b26e-412e-84fd-ad5a94279720","Type":"ContainerStarted","Data":"bd1c779e81fe7c2dfd590ba44b8fd655fbc00bb46322f8a96e4f32a7606e2b80"} Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.930466 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" event={"ID":"f1ca18ee-d4fc-4e20-9719-7647edaa6296","Type":"ContainerStarted","Data":"3820c1a19a48895e2997ec07aa7864382324d25658103ea7530f45211f610813"} Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.934402 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" event={"ID":"4977eaff-2a0a-4dac-8d85-bc2a207254cb","Type":"ContainerStarted","Data":"f577fb65b9d1023d932caa2811f312e0ca8e13e17dfa1d4edcf9afa0c0d04dd5"} Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.938942 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" event={"ID":"d9c7d795-834c-498b-a012-b4d48c277f9c","Type":"ContainerStarted","Data":"79dfe426e61df90dd87fd5c9c363718693b5bf6ea3e75c182b3c4e19ee507380"} Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.943600 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bf469" podStartSLOduration=135.943589175 podStartE2EDuration="2m15.943589175s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:47.941930194 +0000 UTC m=+155.006433708" watchObservedRunningTime="2025-11-29 04:13:47.943589175 +0000 UTC m=+155.008092689" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.948324 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-lsnw8" event={"ID":"139e3088-813e-4ac6-8145-41aaf955cce6","Type":"ContainerStarted","Data":"9d3842905395760fc15d0f72467118fa152113dbf75de13bca02c94b82bda8da"} Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.948402 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-lsnw8" event={"ID":"139e3088-813e-4ac6-8145-41aaf955cce6","Type":"ContainerStarted","Data":"7d25baecc9eda2d7031bb664bd9a4126e6484ac0f24e94f5f34255db9fcbb304"} Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.948416 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.950162 4631 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-kwszg container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.950208 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.950934 4631 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-2jd8q container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" start-of-body= Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.950966 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" podUID="59dfd767-fdf5-4034-85db-6ae66566ee59" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.42:5443/healthz\": dial tcp 10.217.0.42:5443: connect: connection refused" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.953584 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" Nov 29 04:13:47 crc kubenswrapper[4631]: I1129 04:13:47.959204 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l66gm" Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.013859 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.014006 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.513981291 +0000 UTC m=+155.578484805 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.014470 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.014789 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.514777721 +0000 UTC m=+155.579281235 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.056702 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-fqvnm" podStartSLOduration=136.056680224 podStartE2EDuration="2m16.056680224s" podCreationTimestamp="2025-11-29 04:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:48.049064506 +0000 UTC m=+155.113568020" watchObservedRunningTime="2025-11-29 04:13:48.056680224 +0000 UTC m=+155.121183738" Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.115707 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.117228 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.617212766 +0000 UTC m=+155.681716270 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.136883 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x67rw" podStartSLOduration=137.136863731 podStartE2EDuration="2m17.136863731s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:48.13560744 +0000 UTC m=+155.200110954" watchObservedRunningTime="2025-11-29 04:13:48.136863731 +0000 UTC m=+155.201367235" Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.219093 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.219387 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.719375685 +0000 UTC m=+155.783879199 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.259629 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:48 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:48 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:48 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.259682 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.270719 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-crmsz" podStartSLOduration=137.270702531 podStartE2EDuration="2m17.270702531s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:48.212018304 +0000 UTC m=+155.276521818" watchObservedRunningTime="2025-11-29 04:13:48.270702531 +0000 UTC m=+155.335206045" Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.321126 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.321519 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.821506434 +0000 UTC m=+155.886009948 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.332797 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xvxpc" podStartSLOduration=137.332781252 podStartE2EDuration="2m17.332781252s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:48.273323546 +0000 UTC m=+155.337827060" watchObservedRunningTime="2025-11-29 04:13:48.332781252 +0000 UTC m=+155.397284766" Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.407706 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-lsnw8" podStartSLOduration=10.407689669 podStartE2EDuration="10.407689669s" podCreationTimestamp="2025-11-29 04:13:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:48.404681915 +0000 UTC m=+155.469185429" watchObservedRunningTime="2025-11-29 04:13:48.407689669 +0000 UTC m=+155.472193183" Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.422485 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.422893 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:48.922878633 +0000 UTC m=+155.987382147 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.523941 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.524148 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.02411848 +0000 UTC m=+156.088621994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.524231 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.524540 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.02452959 +0000 UTC m=+156.089033104 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.625044 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.625260 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.125238223 +0000 UTC m=+156.189741737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.625567 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.626061 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.126054013 +0000 UTC m=+156.190557527 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.727435 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.727590 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.227565146 +0000 UTC m=+156.292068660 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.727770 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.728145 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.22812929 +0000 UTC m=+156.292632794 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.828647 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.828849 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.328819853 +0000 UTC m=+156.393323367 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.828931 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.829326 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.329318095 +0000 UTC m=+156.393821609 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.929987 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.930179 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.430154562 +0000 UTC m=+156.494658076 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.930383 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:48 crc kubenswrapper[4631]: E1129 04:13:48.930689 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.430677025 +0000 UTC m=+156.495180539 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.951390 4631 patch_prober.go:28] interesting pod/console-operator-58897d9998-4l8kb container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.951442 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4l8kb" podUID="98724a81-608d-48f1-b876-e4354b2ff65d" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.955047 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" event={"ID":"4977eaff-2a0a-4dac-8d85-bc2a207254cb","Type":"ContainerStarted","Data":"2fd7e4ab5e1873a4bcba1d21e3de47c5a4b710d00f40e242a2746483b124eafe"} Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.955732 4631 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-kwszg container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 29 04:13:48 crc kubenswrapper[4631]: I1129 04:13:48.955776 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.031745 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:49 crc kubenswrapper[4631]: E1129 04:13:49.031938 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.531895071 +0000 UTC m=+156.596398585 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.032311 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:49 crc kubenswrapper[4631]: E1129 04:13:49.033448 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.533418138 +0000 UTC m=+156.597921652 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.120746 4631 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.133571 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:49 crc kubenswrapper[4631]: E1129 04:13:49.134151 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.634135232 +0000 UTC m=+156.698638746 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.235309 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:49 crc kubenswrapper[4631]: E1129 04:13:49.235658 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.735645375 +0000 UTC m=+156.800148899 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8h4ns" (UID: "171d32d8-1dcb-497d-9724-d798414c5602") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.252011 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:49 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:49 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:49 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.252066 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.336827 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:49 crc kubenswrapper[4631]: E1129 04:13:49.337147 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-29 04:13:49.837132497 +0000 UTC m=+156.901636011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.365467 4631 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-29T04:13:49.120771202Z","Handler":null,"Name":""} Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.379926 4631 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.379997 4631 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.437717 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.446470 4631 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.446511 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.767467 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8h4ns\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.832746 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dnjrt"] Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.833652 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.838103 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.842891 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.845000 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-mljqh" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.857901 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.908899 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.923452 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dnjrt"] Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.944304 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4qrc\" (UniqueName: \"kubernetes.io/projected/b7485422-4238-4138-9b71-866a1315b330-kube-api-access-l4qrc\") pod \"certified-operators-dnjrt\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.944788 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-utilities\") pod \"certified-operators-dnjrt\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.944878 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-catalog-content\") pod \"certified-operators-dnjrt\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.955376 4631 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-2jd8q container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.955439 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" podUID="59dfd767-fdf5-4034-85db-6ae66566ee59" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.42:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 29 04:13:49 crc kubenswrapper[4631]: I1129 04:13:49.972222 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" event={"ID":"4977eaff-2a0a-4dac-8d85-bc2a207254cb","Type":"ContainerStarted","Data":"531709817ef1d9d35b87d6fa4df8d8d339c893f88adaf61d041673bd3e64e331"} Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.017507 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hplsb"] Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.019149 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.021447 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.047046 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-utilities\") pod \"certified-operators-dnjrt\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.047092 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-utilities\") pod \"certified-operators-dnjrt\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.047124 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-catalog-content\") pod \"certified-operators-dnjrt\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.047171 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4qrc\" (UniqueName: \"kubernetes.io/projected/b7485422-4238-4138-9b71-866a1315b330-kube-api-access-l4qrc\") pod \"certified-operators-dnjrt\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.047495 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-catalog-content\") pod \"certified-operators-dnjrt\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.056816 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hplsb"] Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.100001 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4qrc\" (UniqueName: \"kubernetes.io/projected/b7485422-4238-4138-9b71-866a1315b330-kube-api-access-l4qrc\") pod \"certified-operators-dnjrt\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.146907 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.147657 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-catalog-content\") pod \"community-operators-hplsb\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.147707 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvkkh\" (UniqueName: \"kubernetes.io/projected/35e15c22-60f2-4df1-994c-368c65c4987a-kube-api-access-hvkkh\") pod \"community-operators-hplsb\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.147725 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-utilities\") pod \"community-operators-hplsb\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.230214 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-b7gs9"] Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.231126 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.248556 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-catalog-content\") pod \"community-operators-hplsb\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.248621 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvkkh\" (UniqueName: \"kubernetes.io/projected/35e15c22-60f2-4df1-994c-368c65c4987a-kube-api-access-hvkkh\") pod \"community-operators-hplsb\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.248643 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-utilities\") pod \"community-operators-hplsb\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.249048 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-utilities\") pod \"community-operators-hplsb\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.249396 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-catalog-content\") pod \"community-operators-hplsb\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.261128 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:50 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:50 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:50 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.261188 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.285052 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvkkh\" (UniqueName: \"kubernetes.io/projected/35e15c22-60f2-4df1-994c-368c65c4987a-kube-api-access-hvkkh\") pod \"community-operators-hplsb\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.288136 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b7gs9"] Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.347027 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.349490 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-utilities\") pod \"certified-operators-b7gs9\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.349567 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-catalog-content\") pod \"certified-operators-b7gs9\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.349601 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mc89d\" (UniqueName: \"kubernetes.io/projected/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-kube-api-access-mc89d\") pod \"certified-operators-b7gs9\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.449070 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4lx6m"] Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.449952 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.450475 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-utilities\") pod \"certified-operators-b7gs9\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.450540 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-catalog-content\") pod \"certified-operators-b7gs9\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.450571 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mc89d\" (UniqueName: \"kubernetes.io/projected/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-kube-api-access-mc89d\") pod \"certified-operators-b7gs9\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.451258 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-utilities\") pod \"certified-operators-b7gs9\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.451574 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-catalog-content\") pod \"certified-operators-b7gs9\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.480648 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4lx6m"] Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.506393 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mc89d\" (UniqueName: \"kubernetes.io/projected/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-kube-api-access-mc89d\") pod \"certified-operators-b7gs9\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.544855 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.552940 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-utilities\") pod \"community-operators-4lx6m\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.552981 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-catalog-content\") pod \"community-operators-4lx6m\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.553048 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zx8q9\" (UniqueName: \"kubernetes.io/projected/f623e07c-73c0-4a97-a41f-cc5435e86ecb-kube-api-access-zx8q9\") pod \"community-operators-4lx6m\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.660707 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-catalog-content\") pod \"community-operators-4lx6m\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.660832 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zx8q9\" (UniqueName: \"kubernetes.io/projected/f623e07c-73c0-4a97-a41f-cc5435e86ecb-kube-api-access-zx8q9\") pod \"community-operators-4lx6m\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.660900 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-utilities\") pod \"community-operators-4lx6m\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.661586 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-utilities\") pod \"community-operators-4lx6m\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.661742 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-catalog-content\") pod \"community-operators-4lx6m\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.718257 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zx8q9\" (UniqueName: \"kubernetes.io/projected/f623e07c-73c0-4a97-a41f-cc5435e86ecb-kube-api-access-zx8q9\") pod \"community-operators-4lx6m\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.720494 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.720542 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.728104 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.728133 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.752234 4631 patch_prober.go:28] interesting pod/apiserver-76f77b778f-7lch5 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]log ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]etcd ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/generic-apiserver-start-informers ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/max-in-flight-filter ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 29 04:13:50 crc kubenswrapper[4631]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/project.openshift.io-projectcache ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/openshift.io-startinformers ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 29 04:13:50 crc kubenswrapper[4631]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 29 04:13:50 crc kubenswrapper[4631]: livez check failed Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.752297 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" podUID="386dd0c3-88ce-4690-978a-0ecd6f029d5c" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.761779 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.814815 4631 patch_prober.go:28] interesting pod/downloads-7954f5f757-r9xmz container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.814864 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-r9xmz" podUID="01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.814817 4631 patch_prober.go:28] interesting pod/downloads-7954f5f757-r9xmz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.815063 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r9xmz" podUID="01cc7cce-92f8-44d1-9dd3-2d0b0742b3a8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.834660 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.835577 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.846329 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dnjrt"] Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.850166 4631 patch_prober.go:28] interesting pod/console-f9d7485db-896b9 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.24:8443/health\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 29 04:13:50 crc kubenswrapper[4631]: I1129 04:13:50.850289 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-896b9" podUID="002a7abb-c9ed-4ae8-92da-b4985ff0643c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.24:8443/health\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.026201 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" event={"ID":"4977eaff-2a0a-4dac-8d85-bc2a207254cb","Type":"ContainerStarted","Data":"1d2a63babab2f706b5cc29e0da43961b16c441a081882c5680ddd3059a07fe15"} Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.036548 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnjrt" event={"ID":"b7485422-4238-4138-9b71-866a1315b330","Type":"ContainerStarted","Data":"e96e9b1640159d5f5030507a5d3493f3c7cbddc8ad041608cd262bb3d02260a7"} Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.064287 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8h4ns"] Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.069639 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-zsmnf" podStartSLOduration=13.069624466 podStartE2EDuration="13.069624466s" podCreationTimestamp="2025-11-29 04:13:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:51.061626899 +0000 UTC m=+158.126130423" watchObservedRunningTime="2025-11-29 04:13:51.069624466 +0000 UTC m=+158.134127970" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.241235 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.241733 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b7gs9"] Nov 29 04:13:51 crc kubenswrapper[4631]: W1129 04:13:51.248467 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57bcc661_05dc_4e34_9b78_ecc0d6f5f881.slice/crio-4dd058c919307be3c0df0752535c68ca940a436c0fd05a3e457db06ffe6bac22 WatchSource:0}: Error finding container 4dd058c919307be3c0df0752535c68ca940a436c0fd05a3e457db06ffe6bac22: Status 404 returned error can't find the container with id 4dd058c919307be3c0df0752535c68ca940a436c0fd05a3e457db06ffe6bac22 Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.249505 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.254709 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:51 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:51 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:51 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.254770 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.319408 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hplsb"] Nov 29 04:13:51 crc kubenswrapper[4631]: W1129 04:13:51.342320 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35e15c22_60f2_4df1_994c_368c65c4987a.slice/crio-014e6f90a26fd3778feed9758ea664655d77a073ea33c0e02013155259753763 WatchSource:0}: Error finding container 014e6f90a26fd3778feed9758ea664655d77a073ea33c0e02013155259753763: Status 404 returned error can't find the container with id 014e6f90a26fd3778feed9758ea664655d77a073ea33c0e02013155259753763 Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.555840 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4lx6m"] Nov 29 04:13:51 crc kubenswrapper[4631]: W1129 04:13:51.593547 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf623e07c_73c0_4a97_a41f_cc5435e86ecb.slice/crio-638355a7aa808abc84a33c87bfbf289be3562c0a7278315e38f7539dfed2e6d8 WatchSource:0}: Error finding container 638355a7aa808abc84a33c87bfbf289be3562c0a7278315e38f7539dfed2e6d8: Status 404 returned error can't find the container with id 638355a7aa808abc84a33c87bfbf289be3562c0a7278315e38f7539dfed2e6d8 Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.781605 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-4l8kb" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.823221 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-x9ffd"] Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.824179 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.828096 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.845432 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x9ffd"] Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.884976 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-utilities\") pod \"redhat-marketplace-x9ffd\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.885250 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gbtp\" (UniqueName: \"kubernetes.io/projected/4441569d-edba-4636-b54b-fcfc59f1cd3f-kube-api-access-6gbtp\") pod \"redhat-marketplace-x9ffd\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.885287 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-catalog-content\") pod \"redhat-marketplace-x9ffd\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.902978 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.922541 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2jd8q" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.986414 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-catalog-content\") pod \"redhat-marketplace-x9ffd\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.986543 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-utilities\") pod \"redhat-marketplace-x9ffd\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.986564 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gbtp\" (UniqueName: \"kubernetes.io/projected/4441569d-edba-4636-b54b-fcfc59f1cd3f-kube-api-access-6gbtp\") pod \"redhat-marketplace-x9ffd\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.986998 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-catalog-content\") pod \"redhat-marketplace-x9ffd\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:51 crc kubenswrapper[4631]: I1129 04:13:51.987235 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-utilities\") pod \"redhat-marketplace-x9ffd\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.007107 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gbtp\" (UniqueName: \"kubernetes.io/projected/4441569d-edba-4636-b54b-fcfc59f1cd3f-kube-api-access-6gbtp\") pod \"redhat-marketplace-x9ffd\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.041605 4631 generic.go:334] "Generic (PLEG): container finished" podID="35e15c22-60f2-4df1-994c-368c65c4987a" containerID="97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73" exitCode=0 Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.041672 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hplsb" event={"ID":"35e15c22-60f2-4df1-994c-368c65c4987a","Type":"ContainerDied","Data":"97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73"} Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.041698 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hplsb" event={"ID":"35e15c22-60f2-4df1-994c-368c65c4987a","Type":"ContainerStarted","Data":"014e6f90a26fd3778feed9758ea664655d77a073ea33c0e02013155259753763"} Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.042929 4631 generic.go:334] "Generic (PLEG): container finished" podID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerID="0ed539f005bbc90867cc0f97f445d10ae1a43040e24402b67702384add671884" exitCode=0 Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.042986 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7gs9" event={"ID":"57bcc661-05dc-4e34-9b78-ecc0d6f5f881","Type":"ContainerDied","Data":"0ed539f005bbc90867cc0f97f445d10ae1a43040e24402b67702384add671884"} Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.043012 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7gs9" event={"ID":"57bcc661-05dc-4e34-9b78-ecc0d6f5f881","Type":"ContainerStarted","Data":"4dd058c919307be3c0df0752535c68ca940a436c0fd05a3e457db06ffe6bac22"} Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.043869 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.044848 4631 generic.go:334] "Generic (PLEG): container finished" podID="b7485422-4238-4138-9b71-866a1315b330" containerID="5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270" exitCode=0 Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.044896 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnjrt" event={"ID":"b7485422-4238-4138-9b71-866a1315b330","Type":"ContainerDied","Data":"5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270"} Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.046679 4631 generic.go:334] "Generic (PLEG): container finished" podID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerID="77cca9dca8de93790b3c774c5e28eef672ad3748b02c051d00199896c5340a42" exitCode=0 Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.046734 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lx6m" event={"ID":"f623e07c-73c0-4a97-a41f-cc5435e86ecb","Type":"ContainerDied","Data":"77cca9dca8de93790b3c774c5e28eef672ad3748b02c051d00199896c5340a42"} Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.046750 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lx6m" event={"ID":"f623e07c-73c0-4a97-a41f-cc5435e86ecb","Type":"ContainerStarted","Data":"638355a7aa808abc84a33c87bfbf289be3562c0a7278315e38f7539dfed2e6d8"} Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.048073 4631 generic.go:334] "Generic (PLEG): container finished" podID="78552157-0b5f-437a-988a-71805a812ab2" containerID="69dc560d11ca0b45c0f2bcbea2b6c8f2a41940a43fb4bd5cfbc5ddea9af4bb14" exitCode=0 Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.048137 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" event={"ID":"78552157-0b5f-437a-988a-71805a812ab2","Type":"ContainerDied","Data":"69dc560d11ca0b45c0f2bcbea2b6c8f2a41940a43fb4bd5cfbc5ddea9af4bb14"} Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.050443 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" event={"ID":"171d32d8-1dcb-497d-9724-d798414c5602","Type":"ContainerStarted","Data":"f3a77794fb6b523a46952be09bac56829ce220363132612ae2381218692b0788"} Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.050477 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" event={"ID":"171d32d8-1dcb-497d-9724-d798414c5602","Type":"ContainerStarted","Data":"3f10412e3248c4e07707f7ae1bb83cc7dbf20c2df1f059168171e1121fa4a798"} Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.050585 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.142964 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" podStartSLOduration=141.142949082 podStartE2EDuration="2m21.142949082s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:52.141778893 +0000 UTC m=+159.206282407" watchObservedRunningTime="2025-11-29 04:13:52.142949082 +0000 UTC m=+159.207452596" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.145212 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.216595 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xt8sx"] Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.217674 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.248670 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xt8sx"] Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.252604 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:52 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:52 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:52 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.252652 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.272981 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.273886 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.279675 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.279952 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.293521 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jghbc\" (UniqueName: \"kubernetes.io/projected/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-kube-api-access-jghbc\") pod \"redhat-marketplace-xt8sx\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.293604 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-catalog-content\") pod \"redhat-marketplace-xt8sx\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.293640 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-utilities\") pod \"redhat-marketplace-xt8sx\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.293529 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.395256 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-utilities\") pod \"redhat-marketplace-xt8sx\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.395304 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.395346 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.395383 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jghbc\" (UniqueName: \"kubernetes.io/projected/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-kube-api-access-jghbc\") pod \"redhat-marketplace-xt8sx\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.395565 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-catalog-content\") pod \"redhat-marketplace-xt8sx\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.395735 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-utilities\") pod \"redhat-marketplace-xt8sx\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.395956 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-catalog-content\") pod \"redhat-marketplace-xt8sx\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.424169 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jghbc\" (UniqueName: \"kubernetes.io/projected/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-kube-api-access-jghbc\") pod \"redhat-marketplace-xt8sx\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.497166 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.497230 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.497311 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.513318 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.534044 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.604562 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.639117 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x9ffd"] Nov 29 04:13:52 crc kubenswrapper[4631]: W1129 04:13:52.664351 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4441569d_edba_4636_b54b_fcfc59f1cd3f.slice/crio-72d3106bb1f1fb589e6859678734fef94f9958bd0f2180faa074c7a5ea60e0f9 WatchSource:0}: Error finding container 72d3106bb1f1fb589e6859678734fef94f9958bd0f2180faa074c7a5ea60e0f9: Status 404 returned error can't find the container with id 72d3106bb1f1fb589e6859678734fef94f9958bd0f2180faa074c7a5ea60e0f9 Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.729515 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xt8sx"] Nov 29 04:13:52 crc kubenswrapper[4631]: W1129 04:13:52.743393 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba2e9065_18a4_4fc3_b8f5_a69a2b04286e.slice/crio-f7641711d7577c210a1b4cb8c94b301993db65701d70eea5fdbbd945ec70754d WatchSource:0}: Error finding container f7641711d7577c210a1b4cb8c94b301993db65701d70eea5fdbbd945ec70754d: Status 404 returned error can't find the container with id f7641711d7577c210a1b4cb8c94b301993db65701d70eea5fdbbd945ec70754d Nov 29 04:13:52 crc kubenswrapper[4631]: I1129 04:13:52.818023 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.012200 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dq9ls"] Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.014651 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.016666 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.020733 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dq9ls"] Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.066970 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xt8sx" event={"ID":"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e","Type":"ContainerStarted","Data":"f7641711d7577c210a1b4cb8c94b301993db65701d70eea5fdbbd945ec70754d"} Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.067810 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840","Type":"ContainerStarted","Data":"bfcf6381b656283c21b5a6e597eef12fbacb2adc562927e20211631396bdcb90"} Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.069245 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9ffd" event={"ID":"4441569d-edba-4636-b54b-fcfc59f1cd3f","Type":"ContainerStarted","Data":"72d3106bb1f1fb589e6859678734fef94f9958bd0f2180faa074c7a5ea60e0f9"} Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.115294 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-utilities\") pod \"redhat-operators-dq9ls\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.115351 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9rvm\" (UniqueName: \"kubernetes.io/projected/8dc666e0-e138-4b85-9ecc-d6af453cdc05-kube-api-access-g9rvm\") pod \"redhat-operators-dq9ls\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.115416 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-catalog-content\") pod \"redhat-operators-dq9ls\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.220454 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-catalog-content\") pod \"redhat-operators-dq9ls\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.220797 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-utilities\") pod \"redhat-operators-dq9ls\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.220843 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9rvm\" (UniqueName: \"kubernetes.io/projected/8dc666e0-e138-4b85-9ecc-d6af453cdc05-kube-api-access-g9rvm\") pod \"redhat-operators-dq9ls\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.223104 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-catalog-content\") pod \"redhat-operators-dq9ls\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.223313 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-utilities\") pod \"redhat-operators-dq9ls\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.240851 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9rvm\" (UniqueName: \"kubernetes.io/projected/8dc666e0-e138-4b85-9ecc-d6af453cdc05-kube-api-access-g9rvm\") pod \"redhat-operators-dq9ls\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.250642 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:53 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:53 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:53 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.250700 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.272179 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.366472 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.414357 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bqhps"] Nov 29 04:13:53 crc kubenswrapper[4631]: E1129 04:13:53.414535 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78552157-0b5f-437a-988a-71805a812ab2" containerName="collect-profiles" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.414546 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="78552157-0b5f-437a-988a-71805a812ab2" containerName="collect-profiles" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.414672 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="78552157-0b5f-437a-988a-71805a812ab2" containerName="collect-profiles" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.415641 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.422146 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/78552157-0b5f-437a-988a-71805a812ab2-config-volume\") pod \"78552157-0b5f-437a-988a-71805a812ab2\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.422208 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5v2p\" (UniqueName: \"kubernetes.io/projected/78552157-0b5f-437a-988a-71805a812ab2-kube-api-access-p5v2p\") pod \"78552157-0b5f-437a-988a-71805a812ab2\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.422241 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/78552157-0b5f-437a-988a-71805a812ab2-secret-volume\") pod \"78552157-0b5f-437a-988a-71805a812ab2\" (UID: \"78552157-0b5f-437a-988a-71805a812ab2\") " Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.425723 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bqhps"] Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.426391 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78552157-0b5f-437a-988a-71805a812ab2-config-volume" (OuterVolumeSpecName: "config-volume") pod "78552157-0b5f-437a-988a-71805a812ab2" (UID: "78552157-0b5f-437a-988a-71805a812ab2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.429100 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78552157-0b5f-437a-988a-71805a812ab2-kube-api-access-p5v2p" (OuterVolumeSpecName: "kube-api-access-p5v2p") pod "78552157-0b5f-437a-988a-71805a812ab2" (UID: "78552157-0b5f-437a-988a-71805a812ab2"). InnerVolumeSpecName "kube-api-access-p5v2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.431302 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78552157-0b5f-437a-988a-71805a812ab2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "78552157-0b5f-437a-988a-71805a812ab2" (UID: "78552157-0b5f-437a-988a-71805a812ab2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.524016 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-utilities\") pod \"redhat-operators-bqhps\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.524113 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wfsq\" (UniqueName: \"kubernetes.io/projected/51cb5ea4-3021-451c-b87e-b6f44de274c9-kube-api-access-5wfsq\") pod \"redhat-operators-bqhps\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.524159 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-catalog-content\") pod \"redhat-operators-bqhps\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.524739 4631 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/78552157-0b5f-437a-988a-71805a812ab2-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.524756 4631 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/78552157-0b5f-437a-988a-71805a812ab2-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.524775 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5v2p\" (UniqueName: \"kubernetes.io/projected/78552157-0b5f-437a-988a-71805a812ab2-kube-api-access-p5v2p\") on node \"crc\" DevicePath \"\"" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.615758 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dq9ls"] Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.626687 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-catalog-content\") pod \"redhat-operators-bqhps\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.626725 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-utilities\") pod \"redhat-operators-bqhps\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.626789 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wfsq\" (UniqueName: \"kubernetes.io/projected/51cb5ea4-3021-451c-b87e-b6f44de274c9-kube-api-access-5wfsq\") pod \"redhat-operators-bqhps\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.627147 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-catalog-content\") pod \"redhat-operators-bqhps\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.627231 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-utilities\") pod \"redhat-operators-bqhps\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.647743 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wfsq\" (UniqueName: \"kubernetes.io/projected/51cb5ea4-3021-451c-b87e-b6f44de274c9-kube-api-access-5wfsq\") pod \"redhat-operators-bqhps\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.734477 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:13:53 crc kubenswrapper[4631]: I1129 04:13:53.963128 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bqhps"] Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.100702 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" event={"ID":"78552157-0b5f-437a-988a-71805a812ab2","Type":"ContainerDied","Data":"fc8f497743f2da67fe0c30654c68373427c57ebce040f07330c4e466e9ad2d5a"} Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.101784 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc8f497743f2da67fe0c30654c68373427c57ebce040f07330c4e466e9ad2d5a" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.100743 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.116562 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.116904 4631 generic.go:334] "Generic (PLEG): container finished" podID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerID="d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff" exitCode=0 Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.117392 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.117320 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9ffd" event={"ID":"4441569d-edba-4636-b54b-fcfc59f1cd3f","Type":"ContainerDied","Data":"d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff"} Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.119810 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.120058 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.124176 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqhps" event={"ID":"51cb5ea4-3021-451c-b87e-b6f44de274c9","Type":"ContainerStarted","Data":"d14262909cc9d6b4d191d69c13b5557bd1b2c22577e5e16e8d126a7de5d0468e"} Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.125607 4631 generic.go:334] "Generic (PLEG): container finished" podID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerID="d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13" exitCode=0 Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.125697 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dq9ls" event={"ID":"8dc666e0-e138-4b85-9ecc-d6af453cdc05","Type":"ContainerDied","Data":"d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13"} Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.125727 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dq9ls" event={"ID":"8dc666e0-e138-4b85-9ecc-d6af453cdc05","Type":"ContainerStarted","Data":"c916a61053fb8c6c83448e14f0f1d4464e518e75a59c1b075394b02cd9956178"} Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.127214 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.140561 4631 generic.go:334] "Generic (PLEG): container finished" podID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerID="c845bc47360c7bcd4b034d121fdc853718c06081905893f6f6c654c63acd2b8c" exitCode=0 Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.140629 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xt8sx" event={"ID":"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e","Type":"ContainerDied","Data":"c845bc47360c7bcd4b034d121fdc853718c06081905893f6f6c654c63acd2b8c"} Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.147990 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840","Type":"ContainerStarted","Data":"70665991f3eebfc8c21de2b25597686631fbec4264a2b4fce979afdd9be73edf"} Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.213895 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.213868437 podStartE2EDuration="2.213868437s" podCreationTimestamp="2025-11-29 04:13:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:54.205975242 +0000 UTC m=+161.270478756" watchObservedRunningTime="2025-11-29 04:13:54.213868437 +0000 UTC m=+161.278371951" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.236265 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"765bc44c-c6e5-4819-8a9b-8bf62427e2b6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.236383 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"765bc44c-c6e5-4819-8a9b-8bf62427e2b6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.250367 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:54 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:54 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:54 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.250424 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.337346 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"765bc44c-c6e5-4819-8a9b-8bf62427e2b6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.337446 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"765bc44c-c6e5-4819-8a9b-8bf62427e2b6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.337541 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"765bc44c-c6e5-4819-8a9b-8bf62427e2b6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.356166 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"765bc44c-c6e5-4819-8a9b-8bf62427e2b6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.507120 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.700748 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 29 04:13:54 crc kubenswrapper[4631]: W1129 04:13:54.717509 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod765bc44c_c6e5_4819_8a9b_8bf62427e2b6.slice/crio-242a60250577e0104179e5812071e7f5935c4a12accf51bb8ae04fe7b33035b4 WatchSource:0}: Error finding container 242a60250577e0104179e5812071e7f5935c4a12accf51bb8ae04fe7b33035b4: Status 404 returned error can't find the container with id 242a60250577e0104179e5812071e7f5935c4a12accf51bb8ae04fe7b33035b4 Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.743743 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.749030 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c6c5bb91-f03c-4672-bc61-69a68b8c89d6-metrics-certs\") pod \"network-metrics-daemon-b6vgh\" (UID: \"c6c5bb91-f03c-4672-bc61-69a68b8c89d6\") " pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:54 crc kubenswrapper[4631]: I1129 04:13:54.887911 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:13:55 crc kubenswrapper[4631]: I1129 04:13:55.048492 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-b6vgh" Nov 29 04:13:55 crc kubenswrapper[4631]: I1129 04:13:55.155387 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"765bc44c-c6e5-4819-8a9b-8bf62427e2b6","Type":"ContainerStarted","Data":"242a60250577e0104179e5812071e7f5935c4a12accf51bb8ae04fe7b33035b4"} Nov 29 04:13:55 crc kubenswrapper[4631]: I1129 04:13:55.159430 4631 generic.go:334] "Generic (PLEG): container finished" podID="c13b5aa1-0275-4da5-b9c3-cf4bd4f21840" containerID="70665991f3eebfc8c21de2b25597686631fbec4264a2b4fce979afdd9be73edf" exitCode=0 Nov 29 04:13:55 crc kubenswrapper[4631]: I1129 04:13:55.159495 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840","Type":"ContainerDied","Data":"70665991f3eebfc8c21de2b25597686631fbec4264a2b4fce979afdd9be73edf"} Nov 29 04:13:55 crc kubenswrapper[4631]: I1129 04:13:55.161666 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqhps" event={"ID":"51cb5ea4-3021-451c-b87e-b6f44de274c9","Type":"ContainerStarted","Data":"c826cc3f4f8184a01a3be93ddc877db817b009227385197bc1e7c5a7b5f72bf2"} Nov 29 04:13:55 crc kubenswrapper[4631]: I1129 04:13:55.252044 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:55 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:55 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:55 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:55 crc kubenswrapper[4631]: I1129 04:13:55.252086 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:55 crc kubenswrapper[4631]: I1129 04:13:55.282239 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-b6vgh"] Nov 29 04:13:55 crc kubenswrapper[4631]: W1129 04:13:55.293500 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc6c5bb91_f03c_4672_bc61_69a68b8c89d6.slice/crio-dcbeea4787a7c1d253e6b707c881b328464a4becffa497e290569ffd52cb7af4 WatchSource:0}: Error finding container dcbeea4787a7c1d253e6b707c881b328464a4becffa497e290569ffd52cb7af4: Status 404 returned error can't find the container with id dcbeea4787a7c1d253e6b707c881b328464a4becffa497e290569ffd52cb7af4 Nov 29 04:13:55 crc kubenswrapper[4631]: I1129 04:13:55.733156 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:55 crc kubenswrapper[4631]: I1129 04:13:55.737121 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-7lch5" Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.173632 4631 generic.go:334] "Generic (PLEG): container finished" podID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerID="c826cc3f4f8184a01a3be93ddc877db817b009227385197bc1e7c5a7b5f72bf2" exitCode=0 Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.173901 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqhps" event={"ID":"51cb5ea4-3021-451c-b87e-b6f44de274c9","Type":"ContainerDied","Data":"c826cc3f4f8184a01a3be93ddc877db817b009227385197bc1e7c5a7b5f72bf2"} Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.178611 4631 generic.go:334] "Generic (PLEG): container finished" podID="765bc44c-c6e5-4819-8a9b-8bf62427e2b6" containerID="9d6541fa56c52cdd6ce2b4b818f29acabb9e39a32b2fb09d0451d93de51e57ce" exitCode=0 Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.178673 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"765bc44c-c6e5-4819-8a9b-8bf62427e2b6","Type":"ContainerDied","Data":"9d6541fa56c52cdd6ce2b4b818f29acabb9e39a32b2fb09d0451d93de51e57ce"} Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.180523 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" event={"ID":"c6c5bb91-f03c-4672-bc61-69a68b8c89d6","Type":"ContainerStarted","Data":"5bd1269d4948d1bbddd6c1f7e95243a0adfe31063ea11aea9806cecb2a7a1746"} Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.180578 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" event={"ID":"c6c5bb91-f03c-4672-bc61-69a68b8c89d6","Type":"ContainerStarted","Data":"dcbeea4787a7c1d253e6b707c881b328464a4becffa497e290569ffd52cb7af4"} Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.250815 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:56 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:56 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:56 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.250881 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.409582 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.468451 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kubelet-dir\") pod \"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840\" (UID: \"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840\") " Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.468605 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kube-api-access\") pod \"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840\" (UID: \"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840\") " Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.468741 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "c13b5aa1-0275-4da5-b9c3-cf4bd4f21840" (UID: "c13b5aa1-0275-4da5-b9c3-cf4bd4f21840"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.490203 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c13b5aa1-0275-4da5-b9c3-cf4bd4f21840" (UID: "c13b5aa1-0275-4da5-b9c3-cf4bd4f21840"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.569867 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 04:13:56 crc kubenswrapper[4631]: I1129 04:13:56.569896 4631 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c13b5aa1-0275-4da5-b9c3-cf4bd4f21840-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.010689 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-lsnw8" Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.185842 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-b6vgh" event={"ID":"c6c5bb91-f03c-4672-bc61-69a68b8c89d6","Type":"ContainerStarted","Data":"64601973c5789714197a7c98d49d3fadb89019af649201178787ec3bf6e2116f"} Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.187656 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.188532 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c13b5aa1-0275-4da5-b9c3-cf4bd4f21840","Type":"ContainerDied","Data":"bfcf6381b656283c21b5a6e597eef12fbacb2adc562927e20211631396bdcb90"} Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.188582 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bfcf6381b656283c21b5a6e597eef12fbacb2adc562927e20211631396bdcb90" Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.250003 4631 patch_prober.go:28] interesting pod/router-default-5444994796-pbcfc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 29 04:13:57 crc kubenswrapper[4631]: [-]has-synced failed: reason withheld Nov 29 04:13:57 crc kubenswrapper[4631]: [+]process-running ok Nov 29 04:13:57 crc kubenswrapper[4631]: healthz check failed Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.250065 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pbcfc" podUID="e4d8dab1-39ad-4c93-a452-5ecf8afda237" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.394567 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.484587 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kube-api-access\") pod \"765bc44c-c6e5-4819-8a9b-8bf62427e2b6\" (UID: \"765bc44c-c6e5-4819-8a9b-8bf62427e2b6\") " Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.484683 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kubelet-dir\") pod \"765bc44c-c6e5-4819-8a9b-8bf62427e2b6\" (UID: \"765bc44c-c6e5-4819-8a9b-8bf62427e2b6\") " Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.484888 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "765bc44c-c6e5-4819-8a9b-8bf62427e2b6" (UID: "765bc44c-c6e5-4819-8a9b-8bf62427e2b6"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.504935 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "765bc44c-c6e5-4819-8a9b-8bf62427e2b6" (UID: "765bc44c-c6e5-4819-8a9b-8bf62427e2b6"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.586130 4631 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 29 04:13:57 crc kubenswrapper[4631]: I1129 04:13:57.586167 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/765bc44c-c6e5-4819-8a9b-8bf62427e2b6-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 04:13:58 crc kubenswrapper[4631]: I1129 04:13:58.213236 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 29 04:13:58 crc kubenswrapper[4631]: I1129 04:13:58.214849 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"765bc44c-c6e5-4819-8a9b-8bf62427e2b6","Type":"ContainerDied","Data":"242a60250577e0104179e5812071e7f5935c4a12accf51bb8ae04fe7b33035b4"} Nov 29 04:13:58 crc kubenswrapper[4631]: I1129 04:13:58.214883 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="242a60250577e0104179e5812071e7f5935c4a12accf51bb8ae04fe7b33035b4" Nov 29 04:13:58 crc kubenswrapper[4631]: I1129 04:13:58.233800 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-b6vgh" podStartSLOduration=147.233786118 podStartE2EDuration="2m27.233786118s" podCreationTimestamp="2025-11-29 04:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:13:58.227556345 +0000 UTC m=+165.292059859" watchObservedRunningTime="2025-11-29 04:13:58.233786118 +0000 UTC m=+165.298289632" Nov 29 04:13:58 crc kubenswrapper[4631]: I1129 04:13:58.250663 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:13:58 crc kubenswrapper[4631]: I1129 04:13:58.252582 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-pbcfc" Nov 29 04:14:00 crc kubenswrapper[4631]: I1129 04:14:00.814884 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-r9xmz" Nov 29 04:14:00 crc kubenswrapper[4631]: I1129 04:14:00.860464 4631 patch_prober.go:28] interesting pod/console-f9d7485db-896b9 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.24:8443/health\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 29 04:14:00 crc kubenswrapper[4631]: I1129 04:14:00.860537 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-896b9" podUID="002a7abb-c9ed-4ae8-92da-b4985ff0643c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.24:8443/health\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 29 04:14:09 crc kubenswrapper[4631]: I1129 04:14:09.869420 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:14:10 crc kubenswrapper[4631]: I1129 04:14:10.837365 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:14:10 crc kubenswrapper[4631]: I1129 04:14:10.855133 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:14:20 crc kubenswrapper[4631]: I1129 04:14:20.716786 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:14:20 crc kubenswrapper[4631]: I1129 04:14:20.717605 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:14:21 crc kubenswrapper[4631]: I1129 04:14:21.836478 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-pspvc" Nov 29 04:14:21 crc kubenswrapper[4631]: I1129 04:14:21.878496 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 29 04:14:23 crc kubenswrapper[4631]: E1129 04:14:23.619932 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 29 04:14:23 crc kubenswrapper[4631]: E1129 04:14:23.620075 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zx8q9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-4lx6m_openshift-marketplace(f623e07c-73c0-4a97-a41f-cc5435e86ecb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 04:14:23 crc kubenswrapper[4631]: E1129 04:14:23.621388 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-4lx6m" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" Nov 29 04:14:23 crc kubenswrapper[4631]: E1129 04:14:23.642530 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 29 04:14:23 crc kubenswrapper[4631]: E1129 04:14:23.642648 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hvkkh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-hplsb_openshift-marketplace(35e15c22-60f2-4df1-994c-368c65c4987a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 04:14:23 crc kubenswrapper[4631]: E1129 04:14:23.644223 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-hplsb" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" Nov 29 04:14:26 crc kubenswrapper[4631]: E1129 04:14:26.690537 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-hplsb" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" Nov 29 04:14:27 crc kubenswrapper[4631]: E1129 04:14:27.149749 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 29 04:14:27 crc kubenswrapper[4631]: E1129 04:14:27.149902 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l4qrc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-dnjrt_openshift-marketplace(b7485422-4238-4138-9b71-866a1315b330): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 04:14:27 crc kubenswrapper[4631]: E1129 04:14:27.151245 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-dnjrt" podUID="b7485422-4238-4138-9b71-866a1315b330" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.101864 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 29 04:14:28 crc kubenswrapper[4631]: E1129 04:14:28.102109 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c13b5aa1-0275-4da5-b9c3-cf4bd4f21840" containerName="pruner" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.102124 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="c13b5aa1-0275-4da5-b9c3-cf4bd4f21840" containerName="pruner" Nov 29 04:14:28 crc kubenswrapper[4631]: E1129 04:14:28.102133 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="765bc44c-c6e5-4819-8a9b-8bf62427e2b6" containerName="pruner" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.102141 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="765bc44c-c6e5-4819-8a9b-8bf62427e2b6" containerName="pruner" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.102259 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="c13b5aa1-0275-4da5-b9c3-cf4bd4f21840" containerName="pruner" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.102273 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="765bc44c-c6e5-4819-8a9b-8bf62427e2b6" containerName="pruner" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.102846 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.106596 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.107014 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.126298 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.269459 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fb12c1e3-1750-4c01-a95b-5009584a0a88-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fb12c1e3-1750-4c01-a95b-5009584a0a88\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.269497 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fb12c1e3-1750-4c01-a95b-5009584a0a88-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fb12c1e3-1750-4c01-a95b-5009584a0a88\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.370974 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fb12c1e3-1750-4c01-a95b-5009584a0a88-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fb12c1e3-1750-4c01-a95b-5009584a0a88\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.371008 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fb12c1e3-1750-4c01-a95b-5009584a0a88-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fb12c1e3-1750-4c01-a95b-5009584a0a88\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.371382 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fb12c1e3-1750-4c01-a95b-5009584a0a88-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fb12c1e3-1750-4c01-a95b-5009584a0a88\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.389430 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fb12c1e3-1750-4c01-a95b-5009584a0a88-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fb12c1e3-1750-4c01-a95b-5009584a0a88\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 04:14:28 crc kubenswrapper[4631]: I1129 04:14:28.437056 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 04:14:29 crc kubenswrapper[4631]: E1129 04:14:29.392019 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-4lx6m" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" Nov 29 04:14:29 crc kubenswrapper[4631]: E1129 04:14:29.392319 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-dnjrt" podUID="b7485422-4238-4138-9b71-866a1315b330" Nov 29 04:14:29 crc kubenswrapper[4631]: E1129 04:14:29.402887 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 29 04:14:29 crc kubenswrapper[4631]: E1129 04:14:29.404140 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jghbc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-xt8sx_openshift-marketplace(ba2e9065-18a4-4fc3-b8f5-a69a2b04286e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 04:14:29 crc kubenswrapper[4631]: E1129 04:14:29.405374 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-xt8sx" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" Nov 29 04:14:32 crc kubenswrapper[4631]: E1129 04:14:32.976540 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-xt8sx" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.038586 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.039209 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5wfsq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-bqhps_openshift-marketplace(51cb5ea4-3021-451c-b87e-b6f44de274c9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.040475 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-bqhps" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.097394 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.098529 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.104321 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.173908 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.174033 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mc89d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-b7gs9_openshift-marketplace(57bcc661-05dc-4e34-9b78-ecc0d6f5f881): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.175427 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-b7gs9" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.223507 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.223910 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g9rvm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dq9ls_openshift-marketplace(8dc666e0-e138-4b85-9ecc-d6af453cdc05): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.225235 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-dq9ls" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.232682 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-var-lock\") pod \"installer-9-crc\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.232718 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kube-api-access\") pod \"installer-9-crc\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.232771 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kubelet-dir\") pod \"installer-9-crc\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.334107 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kubelet-dir\") pod \"installer-9-crc\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.334174 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-var-lock\") pod \"installer-9-crc\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.334196 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kube-api-access\") pod \"installer-9-crc\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.334229 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kubelet-dir\") pod \"installer-9-crc\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.334268 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-var-lock\") pod \"installer-9-crc\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.350602 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kube-api-access\") pod \"installer-9-crc\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.427921 4631 generic.go:334] "Generic (PLEG): container finished" podID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerID="01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db" exitCode=0 Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.428857 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9ffd" event={"ID":"4441569d-edba-4636-b54b-fcfc59f1cd3f","Type":"ContainerDied","Data":"01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db"} Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.429546 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b7gs9" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" Nov 29 04:14:33 crc kubenswrapper[4631]: E1129 04:14:33.429888 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-bqhps" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.430082 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.494427 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 29 04:14:33 crc kubenswrapper[4631]: W1129 04:14:33.508082 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podfb12c1e3_1750_4c01_a95b_5009584a0a88.slice/crio-6d240dfa7bc172efeddafda587dcfc6f97dcab8c947821e39166297bb1affdc1 WatchSource:0}: Error finding container 6d240dfa7bc172efeddafda587dcfc6f97dcab8c947821e39166297bb1affdc1: Status 404 returned error can't find the container with id 6d240dfa7bc172efeddafda587dcfc6f97dcab8c947821e39166297bb1affdc1 Nov 29 04:14:33 crc kubenswrapper[4631]: I1129 04:14:33.851945 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 29 04:14:33 crc kubenswrapper[4631]: W1129 04:14:33.860288 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podf11b902c_2b11_4bb5_8cdd_67c739b6f90f.slice/crio-ff0469184df3f855563ef23165fa0d0f5abd3bb56a8526973e0dfbb675207006 WatchSource:0}: Error finding container ff0469184df3f855563ef23165fa0d0f5abd3bb56a8526973e0dfbb675207006: Status 404 returned error can't find the container with id ff0469184df3f855563ef23165fa0d0f5abd3bb56a8526973e0dfbb675207006 Nov 29 04:14:34 crc kubenswrapper[4631]: I1129 04:14:34.434397 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9ffd" event={"ID":"4441569d-edba-4636-b54b-fcfc59f1cd3f","Type":"ContainerStarted","Data":"57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027"} Nov 29 04:14:34 crc kubenswrapper[4631]: I1129 04:14:34.466421 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"f11b902c-2b11-4bb5-8cdd-67c739b6f90f","Type":"ContainerStarted","Data":"ca8d9c71f805c2cb718dcd6938ead07bfe13ba191bb9e9de0b692a9d9b5e8ef5"} Nov 29 04:14:34 crc kubenswrapper[4631]: I1129 04:14:34.466463 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"f11b902c-2b11-4bb5-8cdd-67c739b6f90f","Type":"ContainerStarted","Data":"ff0469184df3f855563ef23165fa0d0f5abd3bb56a8526973e0dfbb675207006"} Nov 29 04:14:34 crc kubenswrapper[4631]: I1129 04:14:34.468154 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fb12c1e3-1750-4c01-a95b-5009584a0a88","Type":"ContainerStarted","Data":"c246509c6d3479933ce57898f6b33e5df70df7a3b56ab6a8cb80104d44ef933b"} Nov 29 04:14:34 crc kubenswrapper[4631]: I1129 04:14:34.468176 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fb12c1e3-1750-4c01-a95b-5009584a0a88","Type":"ContainerStarted","Data":"6d240dfa7bc172efeddafda587dcfc6f97dcab8c947821e39166297bb1affdc1"} Nov 29 04:14:34 crc kubenswrapper[4631]: I1129 04:14:34.479528 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-x9ffd" podStartSLOduration=3.402817367 podStartE2EDuration="43.479508436s" podCreationTimestamp="2025-11-29 04:13:51 +0000 UTC" firstStartedPulling="2025-11-29 04:13:54.128859951 +0000 UTC m=+161.193363455" lastFinishedPulling="2025-11-29 04:14:34.20555101 +0000 UTC m=+201.270054524" observedRunningTime="2025-11-29 04:14:34.478528251 +0000 UTC m=+201.543031765" watchObservedRunningTime="2025-11-29 04:14:34.479508436 +0000 UTC m=+201.544011950" Nov 29 04:14:34 crc kubenswrapper[4631]: I1129 04:14:34.495828 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=6.495812631 podStartE2EDuration="6.495812631s" podCreationTimestamp="2025-11-29 04:14:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:14:34.492608631 +0000 UTC m=+201.557112145" watchObservedRunningTime="2025-11-29 04:14:34.495812631 +0000 UTC m=+201.560316135" Nov 29 04:14:34 crc kubenswrapper[4631]: I1129 04:14:34.507055 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=1.50702704 podStartE2EDuration="1.50702704s" podCreationTimestamp="2025-11-29 04:14:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:14:34.502871007 +0000 UTC m=+201.567374521" watchObservedRunningTime="2025-11-29 04:14:34.50702704 +0000 UTC m=+201.571530554" Nov 29 04:14:35 crc kubenswrapper[4631]: I1129 04:14:35.473857 4631 generic.go:334] "Generic (PLEG): container finished" podID="fb12c1e3-1750-4c01-a95b-5009584a0a88" containerID="c246509c6d3479933ce57898f6b33e5df70df7a3b56ab6a8cb80104d44ef933b" exitCode=0 Nov 29 04:14:35 crc kubenswrapper[4631]: I1129 04:14:35.473929 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fb12c1e3-1750-4c01-a95b-5009584a0a88","Type":"ContainerDied","Data":"c246509c6d3479933ce57898f6b33e5df70df7a3b56ab6a8cb80104d44ef933b"} Nov 29 04:14:36 crc kubenswrapper[4631]: I1129 04:14:36.746172 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 04:14:36 crc kubenswrapper[4631]: I1129 04:14:36.899923 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fb12c1e3-1750-4c01-a95b-5009584a0a88-kubelet-dir\") pod \"fb12c1e3-1750-4c01-a95b-5009584a0a88\" (UID: \"fb12c1e3-1750-4c01-a95b-5009584a0a88\") " Nov 29 04:14:36 crc kubenswrapper[4631]: I1129 04:14:36.900020 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fb12c1e3-1750-4c01-a95b-5009584a0a88-kube-api-access\") pod \"fb12c1e3-1750-4c01-a95b-5009584a0a88\" (UID: \"fb12c1e3-1750-4c01-a95b-5009584a0a88\") " Nov 29 04:14:36 crc kubenswrapper[4631]: I1129 04:14:36.900449 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fb12c1e3-1750-4c01-a95b-5009584a0a88-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "fb12c1e3-1750-4c01-a95b-5009584a0a88" (UID: "fb12c1e3-1750-4c01-a95b-5009584a0a88"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:14:36 crc kubenswrapper[4631]: I1129 04:14:36.906640 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb12c1e3-1750-4c01-a95b-5009584a0a88-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "fb12c1e3-1750-4c01-a95b-5009584a0a88" (UID: "fb12c1e3-1750-4c01-a95b-5009584a0a88"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:14:37 crc kubenswrapper[4631]: I1129 04:14:37.001723 4631 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fb12c1e3-1750-4c01-a95b-5009584a0a88-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 29 04:14:37 crc kubenswrapper[4631]: I1129 04:14:37.002084 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fb12c1e3-1750-4c01-a95b-5009584a0a88-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 04:14:37 crc kubenswrapper[4631]: I1129 04:14:37.488773 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fb12c1e3-1750-4c01-a95b-5009584a0a88","Type":"ContainerDied","Data":"6d240dfa7bc172efeddafda587dcfc6f97dcab8c947821e39166297bb1affdc1"} Nov 29 04:14:37 crc kubenswrapper[4631]: I1129 04:14:37.488816 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d240dfa7bc172efeddafda587dcfc6f97dcab8c947821e39166297bb1affdc1" Nov 29 04:14:37 crc kubenswrapper[4631]: I1129 04:14:37.488880 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 29 04:14:42 crc kubenswrapper[4631]: I1129 04:14:42.145501 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:14:42 crc kubenswrapper[4631]: I1129 04:14:42.146005 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:14:42 crc kubenswrapper[4631]: I1129 04:14:42.204901 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:14:42 crc kubenswrapper[4631]: I1129 04:14:42.515186 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hplsb" event={"ID":"35e15c22-60f2-4df1-994c-368c65c4987a","Type":"ContainerStarted","Data":"01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22"} Nov 29 04:14:42 crc kubenswrapper[4631]: I1129 04:14:42.517631 4631 generic.go:334] "Generic (PLEG): container finished" podID="b7485422-4238-4138-9b71-866a1315b330" containerID="78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d" exitCode=0 Nov 29 04:14:42 crc kubenswrapper[4631]: I1129 04:14:42.517707 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnjrt" event={"ID":"b7485422-4238-4138-9b71-866a1315b330","Type":"ContainerDied","Data":"78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d"} Nov 29 04:14:42 crc kubenswrapper[4631]: I1129 04:14:42.557960 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:14:43 crc kubenswrapper[4631]: I1129 04:14:43.525616 4631 generic.go:334] "Generic (PLEG): container finished" podID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerID="65ab546a2b2db7c3ae8341577b3d48086f64ae1f4a494dcc24e3dcd0c315fdba" exitCode=0 Nov 29 04:14:43 crc kubenswrapper[4631]: I1129 04:14:43.526015 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lx6m" event={"ID":"f623e07c-73c0-4a97-a41f-cc5435e86ecb","Type":"ContainerDied","Data":"65ab546a2b2db7c3ae8341577b3d48086f64ae1f4a494dcc24e3dcd0c315fdba"} Nov 29 04:14:43 crc kubenswrapper[4631]: I1129 04:14:43.540428 4631 generic.go:334] "Generic (PLEG): container finished" podID="35e15c22-60f2-4df1-994c-368c65c4987a" containerID="01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22" exitCode=0 Nov 29 04:14:43 crc kubenswrapper[4631]: I1129 04:14:43.541118 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hplsb" event={"ID":"35e15c22-60f2-4df1-994c-368c65c4987a","Type":"ContainerDied","Data":"01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22"} Nov 29 04:14:44 crc kubenswrapper[4631]: I1129 04:14:44.547673 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hplsb" event={"ID":"35e15c22-60f2-4df1-994c-368c65c4987a","Type":"ContainerStarted","Data":"bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc"} Nov 29 04:14:44 crc kubenswrapper[4631]: I1129 04:14:44.560604 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnjrt" event={"ID":"b7485422-4238-4138-9b71-866a1315b330","Type":"ContainerStarted","Data":"5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656"} Nov 29 04:14:44 crc kubenswrapper[4631]: I1129 04:14:44.562440 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lx6m" event={"ID":"f623e07c-73c0-4a97-a41f-cc5435e86ecb","Type":"ContainerStarted","Data":"d4f50cc1ae3ca02dad4e341c1f12901ae8e87ea250b5cd9ab3a6aa38661af4ec"} Nov 29 04:14:44 crc kubenswrapper[4631]: I1129 04:14:44.573324 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hplsb" podStartSLOduration=2.354397944 podStartE2EDuration="54.573307439s" podCreationTimestamp="2025-11-29 04:13:50 +0000 UTC" firstStartedPulling="2025-11-29 04:13:52.043601762 +0000 UTC m=+159.108105276" lastFinishedPulling="2025-11-29 04:14:44.262511247 +0000 UTC m=+211.327014771" observedRunningTime="2025-11-29 04:14:44.572671943 +0000 UTC m=+211.637175457" watchObservedRunningTime="2025-11-29 04:14:44.573307439 +0000 UTC m=+211.637810963" Nov 29 04:14:44 crc kubenswrapper[4631]: I1129 04:14:44.597647 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4lx6m" podStartSLOduration=2.430404725 podStartE2EDuration="54.597627814s" podCreationTimestamp="2025-11-29 04:13:50 +0000 UTC" firstStartedPulling="2025-11-29 04:13:52.047457198 +0000 UTC m=+159.111960712" lastFinishedPulling="2025-11-29 04:14:44.214680297 +0000 UTC m=+211.279183801" observedRunningTime="2025-11-29 04:14:44.596798624 +0000 UTC m=+211.661302138" watchObservedRunningTime="2025-11-29 04:14:44.597627814 +0000 UTC m=+211.662131328" Nov 29 04:14:44 crc kubenswrapper[4631]: I1129 04:14:44.613762 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dnjrt" podStartSLOduration=4.106979657 podStartE2EDuration="55.613748735s" podCreationTimestamp="2025-11-29 04:13:49 +0000 UTC" firstStartedPulling="2025-11-29 04:13:52.04591608 +0000 UTC m=+159.110419594" lastFinishedPulling="2025-11-29 04:14:43.552685158 +0000 UTC m=+210.617188672" observedRunningTime="2025-11-29 04:14:44.612501974 +0000 UTC m=+211.677005498" watchObservedRunningTime="2025-11-29 04:14:44.613748735 +0000 UTC m=+211.678252249" Nov 29 04:14:45 crc kubenswrapper[4631]: I1129 04:14:45.568868 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqhps" event={"ID":"51cb5ea4-3021-451c-b87e-b6f44de274c9","Type":"ContainerStarted","Data":"c1ff0678e8198a7a1767f5d7b820e368a5ec451578badaa7a4eaee1d374cf636"} Nov 29 04:14:46 crc kubenswrapper[4631]: I1129 04:14:46.574294 4631 generic.go:334] "Generic (PLEG): container finished" podID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerID="c1ff0678e8198a7a1767f5d7b820e368a5ec451578badaa7a4eaee1d374cf636" exitCode=0 Nov 29 04:14:46 crc kubenswrapper[4631]: I1129 04:14:46.574348 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqhps" event={"ID":"51cb5ea4-3021-451c-b87e-b6f44de274c9","Type":"ContainerDied","Data":"c1ff0678e8198a7a1767f5d7b820e368a5ec451578badaa7a4eaee1d374cf636"} Nov 29 04:14:47 crc kubenswrapper[4631]: I1129 04:14:47.581004 4631 generic.go:334] "Generic (PLEG): container finished" podID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerID="e799c40b263589da94b46a5be7095abf56914ec4c374f985d26d3cff470bd8f7" exitCode=0 Nov 29 04:14:47 crc kubenswrapper[4631]: I1129 04:14:47.581061 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7gs9" event={"ID":"57bcc661-05dc-4e34-9b78-ecc0d6f5f881","Type":"ContainerDied","Data":"e799c40b263589da94b46a5be7095abf56914ec4c374f985d26d3cff470bd8f7"} Nov 29 04:14:48 crc kubenswrapper[4631]: I1129 04:14:48.587692 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqhps" event={"ID":"51cb5ea4-3021-451c-b87e-b6f44de274c9","Type":"ContainerStarted","Data":"0f551a0a0e67c364fc988aa8abdab8eaa31b3f2b353c10b5bfbc6cc9f12b8dbb"} Nov 29 04:14:48 crc kubenswrapper[4631]: I1129 04:14:48.603915 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bqhps" podStartSLOduration=3.932354234 podStartE2EDuration="55.603899521s" podCreationTimestamp="2025-11-29 04:13:53 +0000 UTC" firstStartedPulling="2025-11-29 04:13:56.17605382 +0000 UTC m=+163.240557334" lastFinishedPulling="2025-11-29 04:14:47.847599107 +0000 UTC m=+214.912102621" observedRunningTime="2025-11-29 04:14:48.600540148 +0000 UTC m=+215.665043662" watchObservedRunningTime="2025-11-29 04:14:48.603899521 +0000 UTC m=+215.668403035" Nov 29 04:14:49 crc kubenswrapper[4631]: I1129 04:14:49.594808 4631 generic.go:334] "Generic (PLEG): container finished" podID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerID="12c56536a8fe481ae17332bebb1c3667e2f5c079bca3bbf376e08351fbc45f4c" exitCode=0 Nov 29 04:14:49 crc kubenswrapper[4631]: I1129 04:14:49.594881 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xt8sx" event={"ID":"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e","Type":"ContainerDied","Data":"12c56536a8fe481ae17332bebb1c3667e2f5c079bca3bbf376e08351fbc45f4c"} Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.148601 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.148683 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.194651 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.251634 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-swnf5"] Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.347481 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.347531 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.396614 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.640254 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.645177 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.716512 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.716585 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.716629 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.717245 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.717378 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557" gracePeriod=600 Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.762277 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.762532 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:14:50 crc kubenswrapper[4631]: I1129 04:14:50.793470 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:14:51 crc kubenswrapper[4631]: I1129 04:14:51.606184 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557" exitCode=0 Nov 29 04:14:51 crc kubenswrapper[4631]: I1129 04:14:51.606372 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557"} Nov 29 04:14:51 crc kubenswrapper[4631]: I1129 04:14:51.643229 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:14:53 crc kubenswrapper[4631]: I1129 04:14:53.735928 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:14:53 crc kubenswrapper[4631]: I1129 04:14:53.736017 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:14:53 crc kubenswrapper[4631]: I1129 04:14:53.793550 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:14:54 crc kubenswrapper[4631]: I1129 04:14:54.250190 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4lx6m"] Nov 29 04:14:54 crc kubenswrapper[4631]: I1129 04:14:54.250555 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4lx6m" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerName="registry-server" containerID="cri-o://d4f50cc1ae3ca02dad4e341c1f12901ae8e87ea250b5cd9ab3a6aa38661af4ec" gracePeriod=2 Nov 29 04:14:54 crc kubenswrapper[4631]: I1129 04:14:54.691600 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:14:55 crc kubenswrapper[4631]: E1129 04:14:55.470753 4631 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf623e07c_73c0_4a97_a41f_cc5435e86ecb.slice/crio-d4f50cc1ae3ca02dad4e341c1f12901ae8e87ea250b5cd9ab3a6aa38661af4ec.scope\": RecentStats: unable to find data in memory cache]" Nov 29 04:14:56 crc kubenswrapper[4631]: I1129 04:14:56.637854 4631 generic.go:334] "Generic (PLEG): container finished" podID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerID="d4f50cc1ae3ca02dad4e341c1f12901ae8e87ea250b5cd9ab3a6aa38661af4ec" exitCode=0 Nov 29 04:14:56 crc kubenswrapper[4631]: I1129 04:14:56.637971 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lx6m" event={"ID":"f623e07c-73c0-4a97-a41f-cc5435e86ecb","Type":"ContainerDied","Data":"d4f50cc1ae3ca02dad4e341c1f12901ae8e87ea250b5cd9ab3a6aa38661af4ec"} Nov 29 04:14:56 crc kubenswrapper[4631]: I1129 04:14:56.830093 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:14:56 crc kubenswrapper[4631]: I1129 04:14:56.849740 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bqhps"] Nov 29 04:14:56 crc kubenswrapper[4631]: I1129 04:14:56.850007 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bqhps" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerName="registry-server" containerID="cri-o://0f551a0a0e67c364fc988aa8abdab8eaa31b3f2b353c10b5bfbc6cc9f12b8dbb" gracePeriod=2 Nov 29 04:14:56 crc kubenswrapper[4631]: I1129 04:14:56.982186 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zx8q9\" (UniqueName: \"kubernetes.io/projected/f623e07c-73c0-4a97-a41f-cc5435e86ecb-kube-api-access-zx8q9\") pod \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " Nov 29 04:14:56 crc kubenswrapper[4631]: I1129 04:14:56.982282 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-catalog-content\") pod \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " Nov 29 04:14:56 crc kubenswrapper[4631]: I1129 04:14:56.982406 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-utilities\") pod \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\" (UID: \"f623e07c-73c0-4a97-a41f-cc5435e86ecb\") " Nov 29 04:14:56 crc kubenswrapper[4631]: I1129 04:14:56.983270 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-utilities" (OuterVolumeSpecName: "utilities") pod "f623e07c-73c0-4a97-a41f-cc5435e86ecb" (UID: "f623e07c-73c0-4a97-a41f-cc5435e86ecb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:14:56 crc kubenswrapper[4631]: I1129 04:14:56.989452 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f623e07c-73c0-4a97-a41f-cc5435e86ecb-kube-api-access-zx8q9" (OuterVolumeSpecName: "kube-api-access-zx8q9") pod "f623e07c-73c0-4a97-a41f-cc5435e86ecb" (UID: "f623e07c-73c0-4a97-a41f-cc5435e86ecb"). InnerVolumeSpecName "kube-api-access-zx8q9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:14:57 crc kubenswrapper[4631]: I1129 04:14:57.082389 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f623e07c-73c0-4a97-a41f-cc5435e86ecb" (UID: "f623e07c-73c0-4a97-a41f-cc5435e86ecb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:14:57 crc kubenswrapper[4631]: I1129 04:14:57.083925 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:14:57 crc kubenswrapper[4631]: I1129 04:14:57.083950 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zx8q9\" (UniqueName: \"kubernetes.io/projected/f623e07c-73c0-4a97-a41f-cc5435e86ecb-kube-api-access-zx8q9\") on node \"crc\" DevicePath \"\"" Nov 29 04:14:57 crc kubenswrapper[4631]: I1129 04:14:57.083962 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f623e07c-73c0-4a97-a41f-cc5435e86ecb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:14:57 crc kubenswrapper[4631]: I1129 04:14:57.649072 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"5d3a08db8fc9bdeb8d65723a4db4ac13f3423befc007a14c169afc04a3465ddc"} Nov 29 04:14:57 crc kubenswrapper[4631]: I1129 04:14:57.652198 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lx6m" event={"ID":"f623e07c-73c0-4a97-a41f-cc5435e86ecb","Type":"ContainerDied","Data":"638355a7aa808abc84a33c87bfbf289be3562c0a7278315e38f7539dfed2e6d8"} Nov 29 04:14:57 crc kubenswrapper[4631]: I1129 04:14:57.652236 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4lx6m" Nov 29 04:14:57 crc kubenswrapper[4631]: I1129 04:14:57.652288 4631 scope.go:117] "RemoveContainer" containerID="d4f50cc1ae3ca02dad4e341c1f12901ae8e87ea250b5cd9ab3a6aa38661af4ec" Nov 29 04:14:57 crc kubenswrapper[4631]: I1129 04:14:57.682887 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4lx6m"] Nov 29 04:14:57 crc kubenswrapper[4631]: I1129 04:14:57.688206 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4lx6m"] Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.179305 4631 scope.go:117] "RemoveContainer" containerID="65ab546a2b2db7c3ae8341577b3d48086f64ae1f4a494dcc24e3dcd0c315fdba" Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.302142 4631 scope.go:117] "RemoveContainer" containerID="77cca9dca8de93790b3c774c5e28eef672ad3748b02c051d00199896c5340a42" Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.659310 4631 generic.go:334] "Generic (PLEG): container finished" podID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerID="0f551a0a0e67c364fc988aa8abdab8eaa31b3f2b353c10b5bfbc6cc9f12b8dbb" exitCode=0 Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.659619 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqhps" event={"ID":"51cb5ea4-3021-451c-b87e-b6f44de274c9","Type":"ContainerDied","Data":"0f551a0a0e67c364fc988aa8abdab8eaa31b3f2b353c10b5bfbc6cc9f12b8dbb"} Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.661256 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dq9ls" event={"ID":"8dc666e0-e138-4b85-9ecc-d6af453cdc05","Type":"ContainerStarted","Data":"70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c"} Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.663375 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xt8sx" event={"ID":"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e","Type":"ContainerStarted","Data":"4c572e702fa791de6877570d94ee7631cb56d6efebd7847fed65434ae98b98f6"} Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.666211 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7gs9" event={"ID":"57bcc661-05dc-4e34-9b78-ecc0d6f5f881","Type":"ContainerStarted","Data":"57a53a8619e1ff951fa7a6fa89e0f75cb4e0255a49935a1bab5c6105e9ccafc4"} Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.714180 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-b7gs9" podStartSLOduration=4.7416121140000005 podStartE2EDuration="1m8.714165082s" podCreationTimestamp="2025-11-29 04:13:50 +0000 UTC" firstStartedPulling="2025-11-29 04:13:52.044015443 +0000 UTC m=+159.108518957" lastFinishedPulling="2025-11-29 04:14:56.016568371 +0000 UTC m=+223.081071925" observedRunningTime="2025-11-29 04:14:58.712767857 +0000 UTC m=+225.777271371" watchObservedRunningTime="2025-11-29 04:14:58.714165082 +0000 UTC m=+225.778668596" Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.732161 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xt8sx" podStartSLOduration=2.560507677 podStartE2EDuration="1m6.732143429s" podCreationTimestamp="2025-11-29 04:13:52 +0000 UTC" firstStartedPulling="2025-11-29 04:13:54.142952259 +0000 UTC m=+161.207455773" lastFinishedPulling="2025-11-29 04:14:58.314587971 +0000 UTC m=+225.379091525" observedRunningTime="2025-11-29 04:14:58.730852587 +0000 UTC m=+225.795356101" watchObservedRunningTime="2025-11-29 04:14:58.732143429 +0000 UTC m=+225.796646943" Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.878656 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.914002 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-catalog-content\") pod \"51cb5ea4-3021-451c-b87e-b6f44de274c9\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.914048 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-utilities\") pod \"51cb5ea4-3021-451c-b87e-b6f44de274c9\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.914083 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wfsq\" (UniqueName: \"kubernetes.io/projected/51cb5ea4-3021-451c-b87e-b6f44de274c9-kube-api-access-5wfsq\") pod \"51cb5ea4-3021-451c-b87e-b6f44de274c9\" (UID: \"51cb5ea4-3021-451c-b87e-b6f44de274c9\") " Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.914917 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-utilities" (OuterVolumeSpecName: "utilities") pod "51cb5ea4-3021-451c-b87e-b6f44de274c9" (UID: "51cb5ea4-3021-451c-b87e-b6f44de274c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:14:58 crc kubenswrapper[4631]: I1129 04:14:58.930666 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51cb5ea4-3021-451c-b87e-b6f44de274c9-kube-api-access-5wfsq" (OuterVolumeSpecName: "kube-api-access-5wfsq") pod "51cb5ea4-3021-451c-b87e-b6f44de274c9" (UID: "51cb5ea4-3021-451c-b87e-b6f44de274c9"). InnerVolumeSpecName "kube-api-access-5wfsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.015198 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.015460 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wfsq\" (UniqueName: \"kubernetes.io/projected/51cb5ea4-3021-451c-b87e-b6f44de274c9-kube-api-access-5wfsq\") on node \"crc\" DevicePath \"\"" Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.047954 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "51cb5ea4-3021-451c-b87e-b6f44de274c9" (UID: "51cb5ea4-3021-451c-b87e-b6f44de274c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.115797 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51cb5ea4-3021-451c-b87e-b6f44de274c9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.222001 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" path="/var/lib/kubelet/pods/f623e07c-73c0-4a97-a41f-cc5435e86ecb/volumes" Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.672259 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bqhps" event={"ID":"51cb5ea4-3021-451c-b87e-b6f44de274c9","Type":"ContainerDied","Data":"d14262909cc9d6b4d191d69c13b5557bd1b2c22577e5e16e8d126a7de5d0468e"} Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.672313 4631 scope.go:117] "RemoveContainer" containerID="0f551a0a0e67c364fc988aa8abdab8eaa31b3f2b353c10b5bfbc6cc9f12b8dbb" Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.672489 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bqhps" Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.675664 4631 generic.go:334] "Generic (PLEG): container finished" podID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerID="70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c" exitCode=0 Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.675693 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dq9ls" event={"ID":"8dc666e0-e138-4b85-9ecc-d6af453cdc05","Type":"ContainerDied","Data":"70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c"} Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.694133 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bqhps"] Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.696982 4631 scope.go:117] "RemoveContainer" containerID="c1ff0678e8198a7a1767f5d7b820e368a5ec451578badaa7a4eaee1d374cf636" Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.701375 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bqhps"] Nov 29 04:14:59 crc kubenswrapper[4631]: I1129 04:14:59.711131 4631 scope.go:117] "RemoveContainer" containerID="c826cc3f4f8184a01a3be93ddc877db817b009227385197bc1e7c5a7b5f72bf2" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.145639 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs"] Nov 29 04:15:00 crc kubenswrapper[4631]: E1129 04:15:00.145847 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerName="registry-server" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.145861 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerName="registry-server" Nov 29 04:15:00 crc kubenswrapper[4631]: E1129 04:15:00.145874 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerName="extract-utilities" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.145882 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerName="extract-utilities" Nov 29 04:15:00 crc kubenswrapper[4631]: E1129 04:15:00.145894 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb12c1e3-1750-4c01-a95b-5009584a0a88" containerName="pruner" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.145902 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb12c1e3-1750-4c01-a95b-5009584a0a88" containerName="pruner" Nov 29 04:15:00 crc kubenswrapper[4631]: E1129 04:15:00.145911 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerName="extract-utilities" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.145919 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerName="extract-utilities" Nov 29 04:15:00 crc kubenswrapper[4631]: E1129 04:15:00.145929 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerName="extract-content" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.145937 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerName="extract-content" Nov 29 04:15:00 crc kubenswrapper[4631]: E1129 04:15:00.145950 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerName="extract-content" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.145961 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerName="extract-content" Nov 29 04:15:00 crc kubenswrapper[4631]: E1129 04:15:00.145982 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerName="registry-server" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.145990 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerName="registry-server" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.146100 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f623e07c-73c0-4a97-a41f-cc5435e86ecb" containerName="registry-server" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.146116 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" containerName="registry-server" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.146132 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb12c1e3-1750-4c01-a95b-5009584a0a88" containerName="pruner" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.146568 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.152153 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.152449 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.170851 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs"] Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.234375 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5d2v\" (UniqueName: \"kubernetes.io/projected/083f2d0e-7023-4525-bf09-65b19f3b60b0-kube-api-access-f5d2v\") pod \"collect-profiles-29406495-58zvs\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.234738 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/083f2d0e-7023-4525-bf09-65b19f3b60b0-secret-volume\") pod \"collect-profiles-29406495-58zvs\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.234779 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/083f2d0e-7023-4525-bf09-65b19f3b60b0-config-volume\") pod \"collect-profiles-29406495-58zvs\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.336719 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5d2v\" (UniqueName: \"kubernetes.io/projected/083f2d0e-7023-4525-bf09-65b19f3b60b0-kube-api-access-f5d2v\") pod \"collect-profiles-29406495-58zvs\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.336870 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/083f2d0e-7023-4525-bf09-65b19f3b60b0-secret-volume\") pod \"collect-profiles-29406495-58zvs\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.336939 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/083f2d0e-7023-4525-bf09-65b19f3b60b0-config-volume\") pod \"collect-profiles-29406495-58zvs\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.338511 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/083f2d0e-7023-4525-bf09-65b19f3b60b0-config-volume\") pod \"collect-profiles-29406495-58zvs\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.358282 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5d2v\" (UniqueName: \"kubernetes.io/projected/083f2d0e-7023-4525-bf09-65b19f3b60b0-kube-api-access-f5d2v\") pod \"collect-profiles-29406495-58zvs\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.362107 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/083f2d0e-7023-4525-bf09-65b19f3b60b0-secret-volume\") pod \"collect-profiles-29406495-58zvs\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.534037 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.546202 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.546248 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.604955 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.690421 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dq9ls" event={"ID":"8dc666e0-e138-4b85-9ecc-d6af453cdc05","Type":"ContainerStarted","Data":"6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e"} Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.711315 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dq9ls" podStartSLOduration=2.745706336 podStartE2EDuration="1m7.711293516s" podCreationTimestamp="2025-11-29 04:13:53 +0000 UTC" firstStartedPulling="2025-11-29 04:13:55.165553293 +0000 UTC m=+162.230056807" lastFinishedPulling="2025-11-29 04:15:00.131140473 +0000 UTC m=+227.195643987" observedRunningTime="2025-11-29 04:15:00.708203939 +0000 UTC m=+227.772707453" watchObservedRunningTime="2025-11-29 04:15:00.711293516 +0000 UTC m=+227.775797030" Nov 29 04:15:00 crc kubenswrapper[4631]: I1129 04:15:00.976724 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs"] Nov 29 04:15:00 crc kubenswrapper[4631]: W1129 04:15:00.983408 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod083f2d0e_7023_4525_bf09_65b19f3b60b0.slice/crio-cd04bfab3316b46eaf71d5572095093275384ef137c2532f6785c0f52090081d WatchSource:0}: Error finding container cd04bfab3316b46eaf71d5572095093275384ef137c2532f6785c0f52090081d: Status 404 returned error can't find the container with id cd04bfab3316b46eaf71d5572095093275384ef137c2532f6785c0f52090081d Nov 29 04:15:01 crc kubenswrapper[4631]: I1129 04:15:01.227031 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51cb5ea4-3021-451c-b87e-b6f44de274c9" path="/var/lib/kubelet/pods/51cb5ea4-3021-451c-b87e-b6f44de274c9/volumes" Nov 29 04:15:01 crc kubenswrapper[4631]: I1129 04:15:01.706536 4631 generic.go:334] "Generic (PLEG): container finished" podID="083f2d0e-7023-4525-bf09-65b19f3b60b0" containerID="04a16341ab580bcac91e381b4a7fd05a101cc56597e8195930e2b375c6eba1f1" exitCode=0 Nov 29 04:15:01 crc kubenswrapper[4631]: I1129 04:15:01.706606 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" event={"ID":"083f2d0e-7023-4525-bf09-65b19f3b60b0","Type":"ContainerDied","Data":"04a16341ab580bcac91e381b4a7fd05a101cc56597e8195930e2b375c6eba1f1"} Nov 29 04:15:01 crc kubenswrapper[4631]: I1129 04:15:01.706645 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" event={"ID":"083f2d0e-7023-4525-bf09-65b19f3b60b0","Type":"ContainerStarted","Data":"cd04bfab3316b46eaf71d5572095093275384ef137c2532f6785c0f52090081d"} Nov 29 04:15:02 crc kubenswrapper[4631]: I1129 04:15:02.534538 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:15:02 crc kubenswrapper[4631]: I1129 04:15:02.534630 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:15:02 crc kubenswrapper[4631]: I1129 04:15:02.606451 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.049509 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.076498 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5d2v\" (UniqueName: \"kubernetes.io/projected/083f2d0e-7023-4525-bf09-65b19f3b60b0-kube-api-access-f5d2v\") pod \"083f2d0e-7023-4525-bf09-65b19f3b60b0\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.076550 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/083f2d0e-7023-4525-bf09-65b19f3b60b0-secret-volume\") pod \"083f2d0e-7023-4525-bf09-65b19f3b60b0\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.076665 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/083f2d0e-7023-4525-bf09-65b19f3b60b0-config-volume\") pod \"083f2d0e-7023-4525-bf09-65b19f3b60b0\" (UID: \"083f2d0e-7023-4525-bf09-65b19f3b60b0\") " Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.077489 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/083f2d0e-7023-4525-bf09-65b19f3b60b0-config-volume" (OuterVolumeSpecName: "config-volume") pod "083f2d0e-7023-4525-bf09-65b19f3b60b0" (UID: "083f2d0e-7023-4525-bf09-65b19f3b60b0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.083547 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/083f2d0e-7023-4525-bf09-65b19f3b60b0-kube-api-access-f5d2v" (OuterVolumeSpecName: "kube-api-access-f5d2v") pod "083f2d0e-7023-4525-bf09-65b19f3b60b0" (UID: "083f2d0e-7023-4525-bf09-65b19f3b60b0"). InnerVolumeSpecName "kube-api-access-f5d2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.085763 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/083f2d0e-7023-4525-bf09-65b19f3b60b0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "083f2d0e-7023-4525-bf09-65b19f3b60b0" (UID: "083f2d0e-7023-4525-bf09-65b19f3b60b0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.177872 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5d2v\" (UniqueName: \"kubernetes.io/projected/083f2d0e-7023-4525-bf09-65b19f3b60b0-kube-api-access-f5d2v\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.177926 4631 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/083f2d0e-7023-4525-bf09-65b19f3b60b0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.177939 4631 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/083f2d0e-7023-4525-bf09-65b19f3b60b0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.367185 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.367267 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.720824 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" event={"ID":"083f2d0e-7023-4525-bf09-65b19f3b60b0","Type":"ContainerDied","Data":"cd04bfab3316b46eaf71d5572095093275384ef137c2532f6785c0f52090081d"} Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.720866 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs" Nov 29 04:15:03 crc kubenswrapper[4631]: I1129 04:15:03.720894 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd04bfab3316b46eaf71d5572095093275384ef137c2532f6785c0f52090081d" Nov 29 04:15:04 crc kubenswrapper[4631]: I1129 04:15:04.439932 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dq9ls" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerName="registry-server" probeResult="failure" output=< Nov 29 04:15:04 crc kubenswrapper[4631]: timeout: failed to connect service ":50051" within 1s Nov 29 04:15:04 crc kubenswrapper[4631]: > Nov 29 04:15:10 crc kubenswrapper[4631]: I1129 04:15:10.587937 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.649402 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b7gs9"] Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.650039 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-b7gs9" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerName="registry-server" containerID="cri-o://57a53a8619e1ff951fa7a6fa89e0f75cb4e0255a49935a1bab5c6105e9ccafc4" gracePeriod=2 Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.850153 4631 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 29 04:15:11 crc kubenswrapper[4631]: E1129 04:15:11.850513 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="083f2d0e-7023-4525-bf09-65b19f3b60b0" containerName="collect-profiles" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.850541 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="083f2d0e-7023-4525-bf09-65b19f3b60b0" containerName="collect-profiles" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.850747 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="083f2d0e-7023-4525-bf09-65b19f3b60b0" containerName="collect-profiles" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.851277 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.852858 4631 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.853825 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2" gracePeriod=15 Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.853965 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe" gracePeriod=15 Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.853995 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644" gracePeriod=15 Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.854070 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828" gracePeriod=15 Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.854129 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18" gracePeriod=15 Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.854988 4631 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 29 04:15:11 crc kubenswrapper[4631]: E1129 04:15:11.855359 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855379 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 04:15:11 crc kubenswrapper[4631]: E1129 04:15:11.855392 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855400 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 29 04:15:11 crc kubenswrapper[4631]: E1129 04:15:11.855438 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855448 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 29 04:15:11 crc kubenswrapper[4631]: E1129 04:15:11.855458 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855465 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 04:15:11 crc kubenswrapper[4631]: E1129 04:15:11.855485 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855492 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 29 04:15:11 crc kubenswrapper[4631]: E1129 04:15:11.855529 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855537 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 29 04:15:11 crc kubenswrapper[4631]: E1129 04:15:11.855545 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855553 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855714 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855729 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855766 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855777 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855788 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.855800 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.899106 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.899279 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.899408 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.899519 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.899643 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.899738 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.899833 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.899932 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:11 crc kubenswrapper[4631]: I1129 04:15:11.899834 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.001615 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.001712 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.001801 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.001844 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.001880 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.001916 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.001960 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.002007 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.002121 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.002187 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.002232 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.002271 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.002312 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.002418 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.002505 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.002547 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.194764 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:15:12 crc kubenswrapper[4631]: E1129 04:15:12.536647 4631 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.190:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c5f186d11142c openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Created,Message:Created container startup-monitor,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-29 04:15:12.536081452 +0000 UTC m=+239.600584966,LastTimestamp:2025-11-29 04:15:12.536081452 +0000 UTC m=+239.600584966,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.581443 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.581965 4631 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.582381 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.582728 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.780397 4631 generic.go:334] "Generic (PLEG): container finished" podID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerID="57a53a8619e1ff951fa7a6fa89e0f75cb4e0255a49935a1bab5c6105e9ccafc4" exitCode=0 Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.780497 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7gs9" event={"ID":"57bcc661-05dc-4e34-9b78-ecc0d6f5f881","Type":"ContainerDied","Data":"57a53a8619e1ff951fa7a6fa89e0f75cb4e0255a49935a1bab5c6105e9ccafc4"} Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.785068 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7"} Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.785179 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"8225915df212bfc41c86de620e099816dc36d6debcbfccbeb24615af94a79888"} Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.786083 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.786739 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.787395 4631 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.788978 4631 generic.go:334] "Generic (PLEG): container finished" podID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" containerID="ca8d9c71f805c2cb718dcd6938ead07bfe13ba191bb9e9de0b692a9d9b5e8ef5" exitCode=0 Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.789081 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"f11b902c-2b11-4bb5-8cdd-67c739b6f90f","Type":"ContainerDied","Data":"ca8d9c71f805c2cb718dcd6938ead07bfe13ba191bb9e9de0b692a9d9b5e8ef5"} Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.789857 4631 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.790276 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.790840 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.791441 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.795861 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.797941 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.799547 4631 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe" exitCode=0 Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.799582 4631 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644" exitCode=0 Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.799602 4631 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828" exitCode=0 Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.799626 4631 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18" exitCode=2 Nov 29 04:15:12 crc kubenswrapper[4631]: I1129 04:15:12.799684 4631 scope.go:117] "RemoveContainer" containerID="8ebb1bdc0bfa76c2bb9dd377c378462bbae66377400220cbf482bdf25ecabf83" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.225914 4631 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.226965 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.228544 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.229080 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.277919 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.278563 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.279264 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.279631 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.280140 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.327995 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-utilities\") pod \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.328063 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-catalog-content\") pod \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.328098 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mc89d\" (UniqueName: \"kubernetes.io/projected/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-kube-api-access-mc89d\") pod \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\" (UID: \"57bcc661-05dc-4e34-9b78-ecc0d6f5f881\") " Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.332871 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-utilities" (OuterVolumeSpecName: "utilities") pod "57bcc661-05dc-4e34-9b78-ecc0d6f5f881" (UID: "57bcc661-05dc-4e34-9b78-ecc0d6f5f881"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.334562 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-kube-api-access-mc89d" (OuterVolumeSpecName: "kube-api-access-mc89d") pod "57bcc661-05dc-4e34-9b78-ecc0d6f5f881" (UID: "57bcc661-05dc-4e34-9b78-ecc0d6f5f881"). InnerVolumeSpecName "kube-api-access-mc89d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.399859 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57bcc661-05dc-4e34-9b78-ecc0d6f5f881" (UID: "57bcc661-05dc-4e34-9b78-ecc0d6f5f881"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.422802 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.424252 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.424605 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.424795 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.424982 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.425224 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.429811 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.429844 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.429863 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mc89d\" (UniqueName: \"kubernetes.io/projected/57bcc661-05dc-4e34-9b78-ecc0d6f5f881-kube-api-access-mc89d\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.472575 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.473091 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.473449 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.473964 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.474311 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.474732 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.810524 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.815459 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b7gs9" event={"ID":"57bcc661-05dc-4e34-9b78-ecc0d6f5f881","Type":"ContainerDied","Data":"4dd058c919307be3c0df0752535c68ca940a436c0fd05a3e457db06ffe6bac22"} Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.815538 4631 scope.go:117] "RemoveContainer" containerID="57a53a8619e1ff951fa7a6fa89e0f75cb4e0255a49935a1bab5c6105e9ccafc4" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.816294 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b7gs9" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.817390 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.817995 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.818380 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.818732 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.819083 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.844292 4631 scope.go:117] "RemoveContainer" containerID="e799c40b263589da94b46a5be7095abf56914ec4c374f985d26d3cff470bd8f7" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.858688 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.859856 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.860139 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.860652 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.861078 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:13 crc kubenswrapper[4631]: I1129 04:15:13.873918 4631 scope.go:117] "RemoveContainer" containerID="0ed539f005bbc90867cc0f97f445d10ae1a43040e24402b67702384add671884" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.161135 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.161894 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.162357 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.162816 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.163151 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.163473 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.241280 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-var-lock\") pod \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.241381 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kube-api-access\") pod \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.241426 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kubelet-dir\") pod \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\" (UID: \"f11b902c-2b11-4bb5-8cdd-67c739b6f90f\") " Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.241530 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-var-lock" (OuterVolumeSpecName: "var-lock") pod "f11b902c-2b11-4bb5-8cdd-67c739b6f90f" (UID: "f11b902c-2b11-4bb5-8cdd-67c739b6f90f"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.241730 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "f11b902c-2b11-4bb5-8cdd-67c739b6f90f" (UID: "f11b902c-2b11-4bb5-8cdd-67c739b6f90f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.241820 4631 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-var-lock\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.245488 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "f11b902c-2b11-4bb5-8cdd-67c739b6f90f" (UID: "f11b902c-2b11-4bb5-8cdd-67c739b6f90f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.351379 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.351409 4631 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f11b902c-2b11-4bb5-8cdd-67c739b6f90f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.823711 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"f11b902c-2b11-4bb5-8cdd-67c739b6f90f","Type":"ContainerDied","Data":"ff0469184df3f855563ef23165fa0d0f5abd3bb56a8526973e0dfbb675207006"} Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.824091 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff0469184df3f855563ef23165fa0d0f5abd3bb56a8526973e0dfbb675207006" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.824166 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.920203 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.920930 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.921532 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.921885 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:14 crc kubenswrapper[4631]: I1129 04:15:14.922374 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.164513 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.165728 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.166228 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.166714 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.167091 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.167616 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.168421 4631 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.168970 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.262355 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.262416 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.262512 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.262575 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.262590 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.262674 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.263032 4631 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.263055 4631 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.263075 4631 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.279318 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" containerName="oauth-openshift" containerID="cri-o://42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2" gracePeriod=15 Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.828390 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.829516 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.829849 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.830161 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.830519 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.831767 4631 status_manager.go:851] "Failed to get status for pod" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-swnf5\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.832011 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.833775 4631 generic.go:334] "Generic (PLEG): container finished" podID="9ed87add-9fae-43f8-acf1-e8b425d9afee" containerID="42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2" exitCode=0 Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.833825 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.833848 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" event={"ID":"9ed87add-9fae-43f8-acf1-e8b425d9afee","Type":"ContainerDied","Data":"42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2"} Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.834113 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" event={"ID":"9ed87add-9fae-43f8-acf1-e8b425d9afee","Type":"ContainerDied","Data":"24127abc84f50b0e8edb2c94aa58ec4d0508680cabbf4cb732d0a853ddde988b"} Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.834132 4631 scope.go:117] "RemoveContainer" containerID="42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.834689 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.834980 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.835440 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.835686 4631 status_manager.go:851] "Failed to get status for pod" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-swnf5\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.843978 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.844257 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.846819 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.848801 4631 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2" exitCode=0 Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.848906 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.850732 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.851056 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.852136 4631 status_manager.go:851] "Failed to get status for pod" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-swnf5\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.852527 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.853308 4631 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.853683 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.853955 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.863070 4631 scope.go:117] "RemoveContainer" containerID="42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.863538 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.863981 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: E1129 04:15:15.863558 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2\": container with ID starting with 42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2 not found: ID does not exist" containerID="42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.864116 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2"} err="failed to get container status \"42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2\": rpc error: code = NotFound desc = could not find container \"42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2\": container with ID starting with 42a7c5d1881b6eb2ce8be3a23b5e169370bc7941ddfb5c2791ff4336d51a6be2 not found: ID does not exist" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.864145 4631 scope.go:117] "RemoveContainer" containerID="889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.864538 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.864925 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.865998 4631 status_manager.go:851] "Failed to get status for pod" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-swnf5\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.866378 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.867318 4631 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.867999 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-ocp-branding-template\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.868163 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-error\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.868339 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-trusted-ca-bundle\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.868439 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-dir\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.868513 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-router-certs\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.869416 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-cliconfig\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.869518 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-idp-0-file-data\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.869598 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-service-ca\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.869743 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-serving-cert\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.869841 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-session\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.869914 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-policies\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.869983 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-login\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.870071 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-provider-selection\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.870151 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sm7hd\" (UniqueName: \"kubernetes.io/projected/9ed87add-9fae-43f8-acf1-e8b425d9afee-kube-api-access-sm7hd\") pod \"9ed87add-9fae-43f8-acf1-e8b425d9afee\" (UID: \"9ed87add-9fae-43f8-acf1-e8b425d9afee\") " Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.868601 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.869305 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.869835 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.870127 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.872054 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.874709 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ed87add-9fae-43f8-acf1-e8b425d9afee-kube-api-access-sm7hd" (OuterVolumeSpecName: "kube-api-access-sm7hd") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "kube-api-access-sm7hd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.874853 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.875734 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.875824 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.876242 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.878518 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.880501 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.882407 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.883745 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "9ed87add-9fae-43f8-acf1-e8b425d9afee" (UID: "9ed87add-9fae-43f8-acf1-e8b425d9afee"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.885008 4631 scope.go:117] "RemoveContainer" containerID="97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.918651 4631 scope.go:117] "RemoveContainer" containerID="5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.937302 4631 scope.go:117] "RemoveContainer" containerID="94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.957078 4631 scope.go:117] "RemoveContainer" containerID="e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.971695 4631 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.971878 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.972123 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.972826 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sm7hd\" (UniqueName: \"kubernetes.io/projected/9ed87add-9fae-43f8-acf1-e8b425d9afee-kube-api-access-sm7hd\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.972996 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.973113 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.973218 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.973373 4631 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9ed87add-9fae-43f8-acf1-e8b425d9afee-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.973643 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.973794 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.973907 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.974001 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.974081 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.974165 4631 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9ed87add-9fae-43f8-acf1-e8b425d9afee-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 29 04:15:15 crc kubenswrapper[4631]: I1129 04:15:15.976604 4631 scope.go:117] "RemoveContainer" containerID="8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.009011 4631 scope.go:117] "RemoveContainer" containerID="889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.009854 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\": container with ID starting with 889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe not found: ID does not exist" containerID="889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.009977 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe"} err="failed to get container status \"889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\": rpc error: code = NotFound desc = could not find container \"889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe\": container with ID starting with 889077e72a6c7ddee482baa4b61a3409670fa5a770e4e811bb34ae27c38172fe not found: ID does not exist" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.010109 4631 scope.go:117] "RemoveContainer" containerID="97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.010617 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\": container with ID starting with 97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644 not found: ID does not exist" containerID="97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.010648 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644"} err="failed to get container status \"97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\": rpc error: code = NotFound desc = could not find container \"97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644\": container with ID starting with 97959c09cb7c4a33e01a09ad54a02514c82a33cd84fc926a3d11c9b5282fc644 not found: ID does not exist" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.010669 4631 scope.go:117] "RemoveContainer" containerID="5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.010845 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\": container with ID starting with 5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828 not found: ID does not exist" containerID="5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.010863 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828"} err="failed to get container status \"5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\": rpc error: code = NotFound desc = could not find container \"5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828\": container with ID starting with 5eaa2bd95adfbb6ca47b74b361d9b3feedc8560a48493b60093ce368e1cb0828 not found: ID does not exist" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.010876 4631 scope.go:117] "RemoveContainer" containerID="94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.011066 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\": container with ID starting with 94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18 not found: ID does not exist" containerID="94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.011090 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18"} err="failed to get container status \"94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\": rpc error: code = NotFound desc = could not find container \"94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18\": container with ID starting with 94d61dbdd3be1d8d7fa3161646cca355a0a1c1097c9f8726ed8e479a75638e18 not found: ID does not exist" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.011101 4631 scope.go:117] "RemoveContainer" containerID="e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.011412 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\": container with ID starting with e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2 not found: ID does not exist" containerID="e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.011431 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2"} err="failed to get container status \"e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\": rpc error: code = NotFound desc = could not find container \"e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2\": container with ID starting with e32cbd94979230f9580cf4899ada6a4e40cf4f20a949903074ce1689651754c2 not found: ID does not exist" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.011442 4631 scope.go:117] "RemoveContainer" containerID="8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.011698 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\": container with ID starting with 8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4 not found: ID does not exist" containerID="8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.011715 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4"} err="failed to get container status \"8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\": rpc error: code = NotFound desc = could not find container \"8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4\": container with ID starting with 8716433ddbd745c6cd68088ed114e41d6dc698368a5b6efebd0943b813d495d4 not found: ID does not exist" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.149945 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.150364 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.150866 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.151111 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.151323 4631 status_manager.go:851] "Failed to get status for pod" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-swnf5\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.151574 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.151793 4631 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.174431 4631 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.174870 4631 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.175452 4631 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.175841 4631 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.176317 4631 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:16 crc kubenswrapper[4631]: I1129 04:15:16.176406 4631 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.176879 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" interval="200ms" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.378108 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" interval="400ms" Nov 29 04:15:16 crc kubenswrapper[4631]: E1129 04:15:16.780375 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" interval="800ms" Nov 29 04:15:17 crc kubenswrapper[4631]: I1129 04:15:17.227503 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 29 04:15:17 crc kubenswrapper[4631]: E1129 04:15:17.581426 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" interval="1.6s" Nov 29 04:15:19 crc kubenswrapper[4631]: E1129 04:15:19.182958 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" interval="3.2s" Nov 29 04:15:19 crc kubenswrapper[4631]: E1129 04:15:19.361545 4631 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.190:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c5f186d11142c openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Created,Message:Created container startup-monitor,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-29 04:15:12.536081452 +0000 UTC m=+239.600584966,LastTimestamp:2025-11-29 04:15:12.536081452 +0000 UTC m=+239.600584966,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 29 04:15:20 crc kubenswrapper[4631]: E1129 04:15:20.305025 4631 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.190:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" volumeName="registry-storage" Nov 29 04:15:22 crc kubenswrapper[4631]: E1129 04:15:22.384456 4631 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.190:6443: connect: connection refused" interval="6.4s" Nov 29 04:15:23 crc kubenswrapper[4631]: I1129 04:15:23.225673 4631 status_manager.go:851] "Failed to get status for pod" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-swnf5\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:23 crc kubenswrapper[4631]: I1129 04:15:23.226422 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:23 crc kubenswrapper[4631]: I1129 04:15:23.226960 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:23 crc kubenswrapper[4631]: I1129 04:15:23.227662 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:23 crc kubenswrapper[4631]: I1129 04:15:23.228256 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:23 crc kubenswrapper[4631]: I1129 04:15:23.228957 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.216541 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.218844 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.219627 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.224529 4631 status_manager.go:851] "Failed to get status for pod" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-swnf5\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.224991 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.225623 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.226111 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.241651 4631 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.241708 4631 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:25 crc kubenswrapper[4631]: E1129 04:15:25.242506 4631 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.243220 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:25 crc kubenswrapper[4631]: I1129 04:15:25.938801 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"9921be70fd014ebc4f89c3646fa6946e48ed3842267476c9244a0df53f739ffc"} Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.516951 4631 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.517035 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.950322 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.950448 4631 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5" exitCode=1 Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.950540 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5"} Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.951391 4631 scope.go:117] "RemoveContainer" containerID="f6a9b29e3923b3b2c922655ecf3b848005316be99651e413c3998ca0f1958aa5" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.951691 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.952151 4631 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.952932 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.953291 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.953431 4631 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="d8e8714c3c92aa937bbef059b816ae709e9a696af31cbbad26694788a66c0558" exitCode=0 Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.953484 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"d8e8714c3c92aa937bbef059b816ae709e9a696af31cbbad26694788a66c0558"} Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.953853 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.953957 4631 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.954291 4631 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.954630 4631 status_manager.go:851] "Failed to get status for pod" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-swnf5\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.954863 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: E1129 04:15:26.955223 4631 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.955686 4631 status_manager.go:851] "Failed to get status for pod" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" pod="openshift-marketplace/redhat-marketplace-xt8sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-xt8sx\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.956056 4631 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.956445 4631 status_manager.go:851] "Failed to get status for pod" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" pod="openshift-marketplace/redhat-operators-dq9ls" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dq9ls\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.956962 4631 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.957274 4631 status_manager.go:851] "Failed to get status for pod" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.957673 4631 status_manager.go:851] "Failed to get status for pod" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" pod="openshift-authentication/oauth-openshift-558db77b4-swnf5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-swnf5\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:26 crc kubenswrapper[4631]: I1129 04:15:26.958152 4631 status_manager.go:851] "Failed to get status for pod" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" pod="openshift-marketplace/certified-operators-b7gs9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-b7gs9\": dial tcp 38.102.83.190:6443: connect: connection refused" Nov 29 04:15:27 crc kubenswrapper[4631]: I1129 04:15:27.970627 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 29 04:15:27 crc kubenswrapper[4631]: I1129 04:15:27.971073 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"976b3b153d7667d51178dfa11b996c12425225675491fbc2ab17d75faba24e1c"} Nov 29 04:15:27 crc kubenswrapper[4631]: I1129 04:15:27.976426 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c8fd24b77dd2971c94367d2df895249fb904eb6183b62c6cf74a9630566269aa"} Nov 29 04:15:27 crc kubenswrapper[4631]: I1129 04:15:27.976470 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b12407cb37b8722f9ece43a2b2f0f4f71577f66c8fb8112032704053bda2485f"} Nov 29 04:15:27 crc kubenswrapper[4631]: I1129 04:15:27.976488 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"6b811eb9fd5c594760eaeb27918f6548eb989179ed5bc3ddfbc148d948b06ef1"} Nov 29 04:15:27 crc kubenswrapper[4631]: I1129 04:15:27.976500 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"590c77ca8cf2bd0cc153f95a9c34f36af54a2cdcc93b1e210250fe2b013b53e6"} Nov 29 04:15:28 crc kubenswrapper[4631]: I1129 04:15:28.983952 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0485c9007552f19bcb7a3750459c4f2cd7baf45f6ea65715a8a71d7ab6d3180d"} Nov 29 04:15:28 crc kubenswrapper[4631]: I1129 04:15:28.984631 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:28 crc kubenswrapper[4631]: I1129 04:15:28.984787 4631 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:28 crc kubenswrapper[4631]: I1129 04:15:28.984871 4631 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:29 crc kubenswrapper[4631]: I1129 04:15:29.582871 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:15:30 crc kubenswrapper[4631]: I1129 04:15:30.244289 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:30 crc kubenswrapper[4631]: I1129 04:15:30.244878 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:30 crc kubenswrapper[4631]: I1129 04:15:30.252268 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:31 crc kubenswrapper[4631]: I1129 04:15:31.653121 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:15:31 crc kubenswrapper[4631]: I1129 04:15:31.659323 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:15:33 crc kubenswrapper[4631]: I1129 04:15:33.993808 4631 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:34 crc kubenswrapper[4631]: I1129 04:15:34.012510 4631 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:34 crc kubenswrapper[4631]: I1129 04:15:34.012536 4631 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:34 crc kubenswrapper[4631]: I1129 04:15:34.016834 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:34 crc kubenswrapper[4631]: I1129 04:15:34.019239 4631 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="10f2ce0e-3fb2-49a8-866e-ad112f459336" Nov 29 04:15:35 crc kubenswrapper[4631]: I1129 04:15:35.018591 4631 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:35 crc kubenswrapper[4631]: I1129 04:15:35.018631 4631 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:39 crc kubenswrapper[4631]: I1129 04:15:39.590034 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 29 04:15:43 crc kubenswrapper[4631]: I1129 04:15:43.250478 4631 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="10f2ce0e-3fb2-49a8-866e-ad112f459336" Nov 29 04:15:43 crc kubenswrapper[4631]: I1129 04:15:43.441238 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 29 04:15:43 crc kubenswrapper[4631]: I1129 04:15:43.953138 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 29 04:15:44 crc kubenswrapper[4631]: I1129 04:15:44.280131 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 29 04:15:44 crc kubenswrapper[4631]: I1129 04:15:44.328424 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 29 04:15:44 crc kubenswrapper[4631]: I1129 04:15:44.336979 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 29 04:15:44 crc kubenswrapper[4631]: I1129 04:15:44.570930 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 29 04:15:44 crc kubenswrapper[4631]: I1129 04:15:44.867410 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 29 04:15:44 crc kubenswrapper[4631]: I1129 04:15:44.985746 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 29 04:15:45 crc kubenswrapper[4631]: I1129 04:15:45.163729 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 29 04:15:45 crc kubenswrapper[4631]: I1129 04:15:45.234205 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 29 04:15:45 crc kubenswrapper[4631]: I1129 04:15:45.242797 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 29 04:15:45 crc kubenswrapper[4631]: I1129 04:15:45.348674 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 29 04:15:45 crc kubenswrapper[4631]: I1129 04:15:45.370007 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 29 04:15:45 crc kubenswrapper[4631]: I1129 04:15:45.565439 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 29 04:15:45 crc kubenswrapper[4631]: I1129 04:15:45.604768 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 29 04:15:45 crc kubenswrapper[4631]: I1129 04:15:45.833848 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 29 04:15:45 crc kubenswrapper[4631]: I1129 04:15:45.878050 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 29 04:15:45 crc kubenswrapper[4631]: I1129 04:15:45.918060 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 29 04:15:46 crc kubenswrapper[4631]: I1129 04:15:46.053002 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 29 04:15:46 crc kubenswrapper[4631]: I1129 04:15:46.264234 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 29 04:15:46 crc kubenswrapper[4631]: I1129 04:15:46.399915 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 29 04:15:46 crc kubenswrapper[4631]: I1129 04:15:46.509028 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 29 04:15:46 crc kubenswrapper[4631]: I1129 04:15:46.569676 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 29 04:15:46 crc kubenswrapper[4631]: I1129 04:15:46.633037 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 29 04:15:46 crc kubenswrapper[4631]: I1129 04:15:46.800395 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.030458 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.099451 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.161545 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.253462 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.302662 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.316061 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.318599 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.372995 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.404210 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.459813 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.639675 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.676149 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.700416 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.741068 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 29 04:15:47 crc kubenswrapper[4631]: I1129 04:15:47.832445 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.160742 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.174647 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.179689 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.258099 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.405797 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.538045 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.571917 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.653058 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.664192 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.720205 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.834390 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.909517 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 29 04:15:48 crc kubenswrapper[4631]: I1129 04:15:48.977185 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.024528 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.052395 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.092085 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.190704 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.229178 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.258545 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.307815 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.344226 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.485816 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.529271 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.547565 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.654666 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.689874 4631 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.742439 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.748692 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.794768 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 29 04:15:49 crc kubenswrapper[4631]: I1129 04:15:49.810586 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.021829 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.109720 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.200791 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.243443 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.257318 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.260013 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.283452 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.302797 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.307748 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.408701 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.431164 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.478846 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.591214 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.703357 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.756213 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.806252 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.870613 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.924801 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.933016 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 29 04:15:50 crc kubenswrapper[4631]: I1129 04:15:50.965735 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.041979 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.095241 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.142060 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.142439 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.273791 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.416219 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.488357 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.758900 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.796697 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.842103 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.903135 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 29 04:15:51 crc kubenswrapper[4631]: I1129 04:15:51.909493 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.017403 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.023062 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.044768 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.080045 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.086145 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.124399 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.199021 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.310020 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.429389 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.446433 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.514038 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.545752 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.647165 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.943234 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.946432 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 29 04:15:52 crc kubenswrapper[4631]: I1129 04:15:52.992536 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.155128 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.214899 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.239196 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.264495 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.282754 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.345025 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.351199 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.361128 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.381847 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.437670 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.460125 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.524847 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.570440 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.584896 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.585430 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.590029 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.592796 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.682898 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.789056 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.817250 4631 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.818880 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.822441 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=42.822413766 podStartE2EDuration="42.822413766s" podCreationTimestamp="2025-11-29 04:15:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:15:33.90357281 +0000 UTC m=+260.968076324" watchObservedRunningTime="2025-11-29 04:15:53.822413766 +0000 UTC m=+280.886917320" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.825027 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-b7gs9","openshift-authentication/oauth-openshift-558db77b4-swnf5","openshift-kube-apiserver/kube-apiserver-crc"] Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.825109 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.825681 4631 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.825721 4631 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0973ce34-1f3e-4c8a-a7a7-5c6af50c105c" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.837206 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.848223 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=20.848205067 podStartE2EDuration="20.848205067s" podCreationTimestamp="2025-11-29 04:15:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:15:53.846742211 +0000 UTC m=+280.911245755" watchObservedRunningTime="2025-11-29 04:15:53.848205067 +0000 UTC m=+280.912708621" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.885795 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.892263 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.904288 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 29 04:15:53 crc kubenswrapper[4631]: I1129 04:15:53.955651 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.000219 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-76f84477b-6nvlx"] Nov 29 04:15:54 crc kubenswrapper[4631]: E1129 04:15:54.000950 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerName="registry-server" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.000979 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerName="registry-server" Nov 29 04:15:54 crc kubenswrapper[4631]: E1129 04:15:54.001013 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" containerName="installer" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.001029 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" containerName="installer" Nov 29 04:15:54 crc kubenswrapper[4631]: E1129 04:15:54.001069 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" containerName="oauth-openshift" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.001084 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" containerName="oauth-openshift" Nov 29 04:15:54 crc kubenswrapper[4631]: E1129 04:15:54.001101 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerName="extract-utilities" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.001114 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerName="extract-utilities" Nov 29 04:15:54 crc kubenswrapper[4631]: E1129 04:15:54.001141 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerName="extract-content" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.001161 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerName="extract-content" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.001598 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f11b902c-2b11-4bb5-8cdd-67c739b6f90f" containerName="installer" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.001625 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" containerName="registry-server" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.001647 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" containerName="oauth-openshift" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.002698 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.019437 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.019513 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.019878 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.025618 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.026241 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.026602 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.026769 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.026780 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.027091 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.027415 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.027881 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.028081 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.030226 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.030895 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.033568 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.040421 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.044556 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.044676 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-76f84477b-6nvlx"] Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.064139 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068089 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-template-error\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068145 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068208 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-template-login\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068310 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068459 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-service-ca\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068502 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068620 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068659 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068715 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-router-certs\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068748 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-session\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068795 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-audit-dir\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068818 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzqwh\" (UniqueName: \"kubernetes.io/projected/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-kube-api-access-wzqwh\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068847 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.068934 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-audit-policies\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.074506 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.117125 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.139271 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.161568 4631 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.169679 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.169738 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-template-login\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.169773 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.169818 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-service-ca\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.169854 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.169905 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.169940 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.169975 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-router-certs\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.170008 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-session\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.170047 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzqwh\" (UniqueName: \"kubernetes.io/projected/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-kube-api-access-wzqwh\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.170081 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-audit-dir\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.170112 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.170150 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-audit-policies\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.170205 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-template-error\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.171446 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.171465 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-service-ca\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.172175 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-audit-policies\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.172246 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-audit-dir\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.177397 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-template-error\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.178146 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-cliconfig\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.178834 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-router-certs\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.179154 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.179536 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.179748 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-user-template-login\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.179986 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.182306 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-serving-cert\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.185600 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-v4-0-config-system-session\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.187584 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.196800 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzqwh\" (UniqueName: \"kubernetes.io/projected/12d84d2d-71c2-4d50-81b9-fa1f451b3fdf-kube-api-access-wzqwh\") pod \"oauth-openshift-76f84477b-6nvlx\" (UID: \"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf\") " pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.305028 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.332510 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.338762 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.388283 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.466761 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.470534 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.550943 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.554036 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.564462 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.638734 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.667948 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.773551 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.802175 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-76f84477b-6nvlx"] Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.815438 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.863095 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 29 04:15:54 crc kubenswrapper[4631]: I1129 04:15:54.902947 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.010694 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.047959 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.155450 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" event={"ID":"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf","Type":"ContainerStarted","Data":"77aa15d1b85679cb52c058b20f7147561c643b37811f1b17039b0d3244df9406"} Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.155493 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" event={"ID":"12d84d2d-71c2-4d50-81b9-fa1f451b3fdf","Type":"ContainerStarted","Data":"5b21b5f4fa7323c8347d65c2bb2e5932f6f7747f1fab6c5dcba6dac8a35eb817"} Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.175943 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.195899 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.208708 4631 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.222694 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57bcc661-05dc-4e34-9b78-ecc0d6f5f881" path="/var/lib/kubelet/pods/57bcc661-05dc-4e34-9b78-ecc0d6f5f881/volumes" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.223543 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ed87add-9fae-43f8-acf1-e8b425d9afee" path="/var/lib/kubelet/pods/9ed87add-9fae-43f8-acf1-e8b425d9afee/volumes" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.237790 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.272748 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.389178 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.415668 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.467960 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.522419 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.534953 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.593564 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.612611 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.691677 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.712043 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.727984 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.737957 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.776913 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.857830 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.871725 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.913531 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 29 04:15:55 crc kubenswrapper[4631]: I1129 04:15:55.937244 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.009205 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.028309 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.160559 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.166515 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.184964 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.194367 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" podStartSLOduration=66.194348825 podStartE2EDuration="1m6.194348825s" podCreationTimestamp="2025-11-29 04:14:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:15:55.183968769 +0000 UTC m=+282.248472303" watchObservedRunningTime="2025-11-29 04:15:56.194348825 +0000 UTC m=+283.258852349" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.304197 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.386101 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.522370 4631 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.522574 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7" gracePeriod=5 Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.533693 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.616428 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.699358 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.760089 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.805535 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.809631 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.887556 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.957030 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.959603 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 29 04:15:56 crc kubenswrapper[4631]: I1129 04:15:56.968655 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.019481 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.021946 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.082580 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.194662 4631 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.200476 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.224511 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.235710 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.341414 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.532429 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.602415 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.672751 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.727793 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 29 04:15:57 crc kubenswrapper[4631]: I1129 04:15:57.959633 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.028555 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.035707 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.363265 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.430420 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.431027 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.597965 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.838441 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.852662 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.886908 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.887295 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 29 04:15:58 crc kubenswrapper[4631]: I1129 04:15:58.944284 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 29 04:15:59 crc kubenswrapper[4631]: I1129 04:15:59.036273 4631 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 29 04:15:59 crc kubenswrapper[4631]: I1129 04:15:59.117097 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 29 04:15:59 crc kubenswrapper[4631]: I1129 04:15:59.274380 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 29 04:15:59 crc kubenswrapper[4631]: I1129 04:15:59.473508 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 29 04:15:59 crc kubenswrapper[4631]: I1129 04:15:59.492841 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 29 04:15:59 crc kubenswrapper[4631]: I1129 04:15:59.641971 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 29 04:15:59 crc kubenswrapper[4631]: I1129 04:15:59.680535 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 29 04:15:59 crc kubenswrapper[4631]: I1129 04:15:59.763102 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 29 04:16:00 crc kubenswrapper[4631]: I1129 04:16:00.138932 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 29 04:16:01 crc kubenswrapper[4631]: I1129 04:16:01.206560 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.125504 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.125612 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.195266 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.195372 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.195417 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.195519 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.195562 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.195879 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.195992 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.196035 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.196197 4631 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.196219 4631 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.196236 4631 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.196280 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.208944 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.212834 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.212945 4631 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7" exitCode=137 Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.213006 4631 scope.go:117] "RemoveContainer" containerID="cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.213032 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.284217 4631 scope.go:117] "RemoveContainer" containerID="cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7" Nov 29 04:16:02 crc kubenswrapper[4631]: E1129 04:16:02.285018 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7\": container with ID starting with cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7 not found: ID does not exist" containerID="cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.285056 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7"} err="failed to get container status \"cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7\": rpc error: code = NotFound desc = could not find container \"cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7\": container with ID starting with cb40900d1b41e680808749b843da9e8b496957b4617499bd24da43ba45ad4de7 not found: ID does not exist" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.298151 4631 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:02 crc kubenswrapper[4631]: I1129 04:16:02.298278 4631 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:03 crc kubenswrapper[4631]: I1129 04:16:03.228043 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 29 04:16:03 crc kubenswrapper[4631]: I1129 04:16:03.228540 4631 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 29 04:16:03 crc kubenswrapper[4631]: I1129 04:16:03.244321 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 29 04:16:03 crc kubenswrapper[4631]: I1129 04:16:03.244842 4631 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="de6eb66f-cfb2-4ab4-965d-4ff45ed9bc96" Nov 29 04:16:03 crc kubenswrapper[4631]: I1129 04:16:03.253265 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 29 04:16:03 crc kubenswrapper[4631]: I1129 04:16:03.253483 4631 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="de6eb66f-cfb2-4ab4-965d-4ff45ed9bc96" Nov 29 04:16:21 crc kubenswrapper[4631]: I1129 04:16:21.327228 4631 generic.go:334] "Generic (PLEG): container finished" podID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerID="0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928" exitCode=0 Nov 29 04:16:21 crc kubenswrapper[4631]: I1129 04:16:21.327438 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" event={"ID":"dba614c7-ceae-4ce5-afb6-6d082156f640","Type":"ContainerDied","Data":"0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928"} Nov 29 04:16:21 crc kubenswrapper[4631]: I1129 04:16:21.328207 4631 scope.go:117] "RemoveContainer" containerID="0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928" Nov 29 04:16:21 crc kubenswrapper[4631]: I1129 04:16:21.897583 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:16:21 crc kubenswrapper[4631]: I1129 04:16:21.898096 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:16:22 crc kubenswrapper[4631]: I1129 04:16:22.338884 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" event={"ID":"dba614c7-ceae-4ce5-afb6-6d082156f640","Type":"ContainerStarted","Data":"8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8"} Nov 29 04:16:22 crc kubenswrapper[4631]: I1129 04:16:22.339239 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:16:22 crc kubenswrapper[4631]: I1129 04:16:22.345372 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.301213 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sgtgs"] Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.302097 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" podUID="2099834e-bb35-49e5-b94b-06cf1d172cb2" containerName="controller-manager" containerID="cri-o://822206c9e32161dc032c34a19f96c9e00865401ff63d0b9c88415503122c10fa" gracePeriod=30 Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.389281 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8"] Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.389577 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" podUID="7f223354-db6f-4227-9e64-39c01f942b11" containerName="route-controller-manager" containerID="cri-o://8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff" gracePeriod=30 Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.508748 4631 generic.go:334] "Generic (PLEG): container finished" podID="2099834e-bb35-49e5-b94b-06cf1d172cb2" containerID="822206c9e32161dc032c34a19f96c9e00865401ff63d0b9c88415503122c10fa" exitCode=0 Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.509099 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" event={"ID":"2099834e-bb35-49e5-b94b-06cf1d172cb2","Type":"ContainerDied","Data":"822206c9e32161dc032c34a19f96c9e00865401ff63d0b9c88415503122c10fa"} Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.679693 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.777198 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.781360 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-client-ca\") pod \"2099834e-bb35-49e5-b94b-06cf1d172cb2\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.781407 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-config\") pod \"2099834e-bb35-49e5-b94b-06cf1d172cb2\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.781425 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-proxy-ca-bundles\") pod \"2099834e-bb35-49e5-b94b-06cf1d172cb2\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.781449 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2099834e-bb35-49e5-b94b-06cf1d172cb2-serving-cert\") pod \"2099834e-bb35-49e5-b94b-06cf1d172cb2\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.781543 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8p9pp\" (UniqueName: \"kubernetes.io/projected/2099834e-bb35-49e5-b94b-06cf1d172cb2-kube-api-access-8p9pp\") pod \"2099834e-bb35-49e5-b94b-06cf1d172cb2\" (UID: \"2099834e-bb35-49e5-b94b-06cf1d172cb2\") " Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.782565 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-client-ca" (OuterVolumeSpecName: "client-ca") pod "2099834e-bb35-49e5-b94b-06cf1d172cb2" (UID: "2099834e-bb35-49e5-b94b-06cf1d172cb2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.782633 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "2099834e-bb35-49e5-b94b-06cf1d172cb2" (UID: "2099834e-bb35-49e5-b94b-06cf1d172cb2"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.783150 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-config" (OuterVolumeSpecName: "config") pod "2099834e-bb35-49e5-b94b-06cf1d172cb2" (UID: "2099834e-bb35-49e5-b94b-06cf1d172cb2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.786631 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2099834e-bb35-49e5-b94b-06cf1d172cb2-kube-api-access-8p9pp" (OuterVolumeSpecName: "kube-api-access-8p9pp") pod "2099834e-bb35-49e5-b94b-06cf1d172cb2" (UID: "2099834e-bb35-49e5-b94b-06cf1d172cb2"). InnerVolumeSpecName "kube-api-access-8p9pp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.789701 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2099834e-bb35-49e5-b94b-06cf1d172cb2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2099834e-bb35-49e5-b94b-06cf1d172cb2" (UID: "2099834e-bb35-49e5-b94b-06cf1d172cb2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.882546 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-client-ca\") pod \"7f223354-db6f-4227-9e64-39c01f942b11\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.882610 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-config\") pod \"7f223354-db6f-4227-9e64-39c01f942b11\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.882692 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f223354-db6f-4227-9e64-39c01f942b11-serving-cert\") pod \"7f223354-db6f-4227-9e64-39c01f942b11\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.882735 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvxxw\" (UniqueName: \"kubernetes.io/projected/7f223354-db6f-4227-9e64-39c01f942b11-kube-api-access-mvxxw\") pod \"7f223354-db6f-4227-9e64-39c01f942b11\" (UID: \"7f223354-db6f-4227-9e64-39c01f942b11\") " Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.883258 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-client-ca" (OuterVolumeSpecName: "client-ca") pod "7f223354-db6f-4227-9e64-39c01f942b11" (UID: "7f223354-db6f-4227-9e64-39c01f942b11"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.883312 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-config" (OuterVolumeSpecName: "config") pod "7f223354-db6f-4227-9e64-39c01f942b11" (UID: "7f223354-db6f-4227-9e64-39c01f942b11"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.883764 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2099834e-bb35-49e5-b94b-06cf1d172cb2-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.883790 4631 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.883803 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f223354-db6f-4227-9e64-39c01f942b11-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.883817 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8p9pp\" (UniqueName: \"kubernetes.io/projected/2099834e-bb35-49e5-b94b-06cf1d172cb2-kube-api-access-8p9pp\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.883831 4631 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-client-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.883842 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:45 crc kubenswrapper[4631]: I1129 04:16:45.883853 4631 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2099834e-bb35-49e5-b94b-06cf1d172cb2-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.551702 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f223354-db6f-4227-9e64-39c01f942b11-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7f223354-db6f-4227-9e64-39c01f942b11" (UID: "7f223354-db6f-4227-9e64-39c01f942b11"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.551800 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f223354-db6f-4227-9e64-39c01f942b11-kube-api-access-mvxxw" (OuterVolumeSpecName: "kube-api-access-mvxxw") pod "7f223354-db6f-4227-9e64-39c01f942b11" (UID: "7f223354-db6f-4227-9e64-39c01f942b11"). InnerVolumeSpecName "kube-api-access-mvxxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.551970 4631 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f223354-db6f-4227-9e64-39c01f942b11-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.552002 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvxxw\" (UniqueName: \"kubernetes.io/projected/7f223354-db6f-4227-9e64-39c01f942b11-kube-api-access-mvxxw\") on node \"crc\" DevicePath \"\"" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.560582 4631 generic.go:334] "Generic (PLEG): container finished" podID="7f223354-db6f-4227-9e64-39c01f942b11" containerID="8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff" exitCode=0 Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.560681 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" event={"ID":"7f223354-db6f-4227-9e64-39c01f942b11","Type":"ContainerDied","Data":"8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff"} Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.560722 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" event={"ID":"7f223354-db6f-4227-9e64-39c01f942b11","Type":"ContainerDied","Data":"298a19d5de35ace548b2298314e4402300dc175607a950369c24282f53d9cfce"} Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.560749 4631 scope.go:117] "RemoveContainer" containerID="8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.560891 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.564452 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" event={"ID":"2099834e-bb35-49e5-b94b-06cf1d172cb2","Type":"ContainerDied","Data":"27a2a226305dc9915f556bdf9f0f3e8f57175d25304f1a94b6b42565e1f6f08b"} Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.564528 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-sgtgs" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.580668 4631 scope.go:117] "RemoveContainer" containerID="8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff" Nov 29 04:16:46 crc kubenswrapper[4631]: E1129 04:16:46.581601 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff\": container with ID starting with 8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff not found: ID does not exist" containerID="8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.581633 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff"} err="failed to get container status \"8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff\": rpc error: code = NotFound desc = could not find container \"8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff\": container with ID starting with 8b7287105de708f8eb03683850e4fcbf2606528dd91447c480b3941535bc57ff not found: ID does not exist" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.581656 4631 scope.go:117] "RemoveContainer" containerID="822206c9e32161dc032c34a19f96c9e00865401ff63d0b9c88415503122c10fa" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.606556 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8"] Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.610958 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ct7j8"] Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.615852 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sgtgs"] Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.625136 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-sgtgs"] Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.700138 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5b89d5b47d-576mw"] Nov 29 04:16:46 crc kubenswrapper[4631]: E1129 04:16:46.700393 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f223354-db6f-4227-9e64-39c01f942b11" containerName="route-controller-manager" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.700407 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f223354-db6f-4227-9e64-39c01f942b11" containerName="route-controller-manager" Nov 29 04:16:46 crc kubenswrapper[4631]: E1129 04:16:46.700422 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.700428 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 29 04:16:46 crc kubenswrapper[4631]: E1129 04:16:46.700437 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2099834e-bb35-49e5-b94b-06cf1d172cb2" containerName="controller-manager" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.700444 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="2099834e-bb35-49e5-b94b-06cf1d172cb2" containerName="controller-manager" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.700536 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="2099834e-bb35-49e5-b94b-06cf1d172cb2" containerName="controller-manager" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.700544 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f223354-db6f-4227-9e64-39c01f942b11" containerName="route-controller-manager" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.700552 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.700915 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.702823 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.706849 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.707275 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.707530 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.707759 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.708744 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.717642 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b89d5b47d-576mw"] Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.722948 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.855310 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/089f27e8-7205-48ee-9540-d7b6aab08bcb-client-ca\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.855381 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/089f27e8-7205-48ee-9540-d7b6aab08bcb-config\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.855415 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/089f27e8-7205-48ee-9540-d7b6aab08bcb-serving-cert\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.855502 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/089f27e8-7205-48ee-9540-d7b6aab08bcb-proxy-ca-bundles\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.855551 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzpzs\" (UniqueName: \"kubernetes.io/projected/089f27e8-7205-48ee-9540-d7b6aab08bcb-kube-api-access-hzpzs\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.956407 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/089f27e8-7205-48ee-9540-d7b6aab08bcb-client-ca\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.956468 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/089f27e8-7205-48ee-9540-d7b6aab08bcb-config\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.956516 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/089f27e8-7205-48ee-9540-d7b6aab08bcb-serving-cert\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.956623 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/089f27e8-7205-48ee-9540-d7b6aab08bcb-proxy-ca-bundles\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.956684 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzpzs\" (UniqueName: \"kubernetes.io/projected/089f27e8-7205-48ee-9540-d7b6aab08bcb-kube-api-access-hzpzs\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.957278 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/089f27e8-7205-48ee-9540-d7b6aab08bcb-client-ca\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.958551 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/089f27e8-7205-48ee-9540-d7b6aab08bcb-config\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.958844 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/089f27e8-7205-48ee-9540-d7b6aab08bcb-proxy-ca-bundles\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.966436 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/089f27e8-7205-48ee-9540-d7b6aab08bcb-serving-cert\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:46 crc kubenswrapper[4631]: I1129 04:16:46.985127 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzpzs\" (UniqueName: \"kubernetes.io/projected/089f27e8-7205-48ee-9540-d7b6aab08bcb-kube-api-access-hzpzs\") pod \"controller-manager-5b89d5b47d-576mw\" (UID: \"089f27e8-7205-48ee-9540-d7b6aab08bcb\") " pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.021024 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.083741 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b"] Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.084482 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.087476 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.087891 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.088925 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.089004 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.094102 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b"] Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.094526 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.096388 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.159618 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/894f09e7-f5ee-488f-b021-1a3052cb842e-serving-cert\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.159715 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/894f09e7-f5ee-488f-b021-1a3052cb842e-config\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.159764 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wskhg\" (UniqueName: \"kubernetes.io/projected/894f09e7-f5ee-488f-b021-1a3052cb842e-kube-api-access-wskhg\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.159795 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/894f09e7-f5ee-488f-b021-1a3052cb842e-client-ca\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.227832 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2099834e-bb35-49e5-b94b-06cf1d172cb2" path="/var/lib/kubelet/pods/2099834e-bb35-49e5-b94b-06cf1d172cb2/volumes" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.228439 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f223354-db6f-4227-9e64-39c01f942b11" path="/var/lib/kubelet/pods/7f223354-db6f-4227-9e64-39c01f942b11/volumes" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.262380 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/894f09e7-f5ee-488f-b021-1a3052cb842e-config\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.262431 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wskhg\" (UniqueName: \"kubernetes.io/projected/894f09e7-f5ee-488f-b021-1a3052cb842e-kube-api-access-wskhg\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.262450 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/894f09e7-f5ee-488f-b021-1a3052cb842e-client-ca\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.263821 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/894f09e7-f5ee-488f-b021-1a3052cb842e-client-ca\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.263927 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/894f09e7-f5ee-488f-b021-1a3052cb842e-config\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.264192 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/894f09e7-f5ee-488f-b021-1a3052cb842e-serving-cert\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.265653 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b89d5b47d-576mw"] Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.269396 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/894f09e7-f5ee-488f-b021-1a3052cb842e-serving-cert\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.277083 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wskhg\" (UniqueName: \"kubernetes.io/projected/894f09e7-f5ee-488f-b021-1a3052cb842e-kube-api-access-wskhg\") pod \"route-controller-manager-8bcc8b8db-hr74b\" (UID: \"894f09e7-f5ee-488f-b021-1a3052cb842e\") " pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.436157 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.578902 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" event={"ID":"089f27e8-7205-48ee-9540-d7b6aab08bcb","Type":"ContainerStarted","Data":"b0835d3c503631a230899cec07e22d0eb87732af3c666fbda3c33b5b1b143906"} Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.578955 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" event={"ID":"089f27e8-7205-48ee-9540-d7b6aab08bcb","Type":"ContainerStarted","Data":"21dab268b3e41bad2d09b2748f249c985cdcdbcd2326624f1a754fc96b0fb163"} Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.579351 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.587465 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.602870 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5b89d5b47d-576mw" podStartSLOduration=1.602834488 podStartE2EDuration="1.602834488s" podCreationTimestamp="2025-11-29 04:16:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:16:47.598701496 +0000 UTC m=+334.663205020" watchObservedRunningTime="2025-11-29 04:16:47.602834488 +0000 UTC m=+334.667338002" Nov 29 04:16:47 crc kubenswrapper[4631]: I1129 04:16:47.713920 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b"] Nov 29 04:16:48 crc kubenswrapper[4631]: I1129 04:16:48.588609 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" event={"ID":"894f09e7-f5ee-488f-b021-1a3052cb842e","Type":"ContainerStarted","Data":"aff50fac65000a6abfcbfb13c1a91526d6eb043b030394fb68eaccefe301f1a1"} Nov 29 04:16:48 crc kubenswrapper[4631]: I1129 04:16:48.588956 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" event={"ID":"894f09e7-f5ee-488f-b021-1a3052cb842e","Type":"ContainerStarted","Data":"964efcc3c0f62866d6a5a03e3d346ce9ef36c3e5b3e91345c197fc4b2e7d0b4b"} Nov 29 04:16:48 crc kubenswrapper[4631]: I1129 04:16:48.613754 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" podStartSLOduration=3.613731388 podStartE2EDuration="3.613731388s" podCreationTimestamp="2025-11-29 04:16:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:16:48.607901643 +0000 UTC m=+335.672405187" watchObservedRunningTime="2025-11-29 04:16:48.613731388 +0000 UTC m=+335.678234932" Nov 29 04:16:49 crc kubenswrapper[4631]: I1129 04:16:49.594678 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:16:49 crc kubenswrapper[4631]: I1129 04:16:49.603435 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-8bcc8b8db-hr74b" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.630468 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-q99pb"] Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.631773 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.692085 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-q99pb"] Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.731683 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b4d272e9-b579-4bd5-9f3e-ea1afad04611-bound-sa-token\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.731726 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrqzf\" (UniqueName: \"kubernetes.io/projected/b4d272e9-b579-4bd5-9f3e-ea1afad04611-kube-api-access-qrqzf\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.731747 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b4d272e9-b579-4bd5-9f3e-ea1afad04611-ca-trust-extracted\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.731919 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b4d272e9-b579-4bd5-9f3e-ea1afad04611-trusted-ca\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.731970 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b4d272e9-b579-4bd5-9f3e-ea1afad04611-registry-tls\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.732098 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b4d272e9-b579-4bd5-9f3e-ea1afad04611-installation-pull-secrets\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.732208 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b4d272e9-b579-4bd5-9f3e-ea1afad04611-registry-certificates\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.732240 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.763820 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.833587 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrqzf\" (UniqueName: \"kubernetes.io/projected/b4d272e9-b579-4bd5-9f3e-ea1afad04611-kube-api-access-qrqzf\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.833647 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b4d272e9-b579-4bd5-9f3e-ea1afad04611-ca-trust-extracted\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.833715 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b4d272e9-b579-4bd5-9f3e-ea1afad04611-trusted-ca\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.833736 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b4d272e9-b579-4bd5-9f3e-ea1afad04611-registry-tls\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.833772 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b4d272e9-b579-4bd5-9f3e-ea1afad04611-installation-pull-secrets\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.833802 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b4d272e9-b579-4bd5-9f3e-ea1afad04611-registry-certificates\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.833830 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b4d272e9-b579-4bd5-9f3e-ea1afad04611-bound-sa-token\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.834606 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b4d272e9-b579-4bd5-9f3e-ea1afad04611-ca-trust-extracted\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.835445 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b4d272e9-b579-4bd5-9f3e-ea1afad04611-trusted-ca\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.835464 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b4d272e9-b579-4bd5-9f3e-ea1afad04611-registry-certificates\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.853672 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b4d272e9-b579-4bd5-9f3e-ea1afad04611-registry-tls\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.853998 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrqzf\" (UniqueName: \"kubernetes.io/projected/b4d272e9-b579-4bd5-9f3e-ea1afad04611-kube-api-access-qrqzf\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.854757 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b4d272e9-b579-4bd5-9f3e-ea1afad04611-installation-pull-secrets\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.855295 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b4d272e9-b579-4bd5-9f3e-ea1afad04611-bound-sa-token\") pod \"image-registry-66df7c8f76-q99pb\" (UID: \"b4d272e9-b579-4bd5-9f3e-ea1afad04611\") " pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:15 crc kubenswrapper[4631]: I1129 04:17:15.949206 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:16 crc kubenswrapper[4631]: I1129 04:17:16.435433 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-q99pb"] Nov 29 04:17:16 crc kubenswrapper[4631]: I1129 04:17:16.776574 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" event={"ID":"b4d272e9-b579-4bd5-9f3e-ea1afad04611","Type":"ContainerStarted","Data":"80bf3b357c6a3d3872c3954ea26ca4ab8b50026aa6a05deb80def3e7e8b8bd68"} Nov 29 04:17:16 crc kubenswrapper[4631]: I1129 04:17:16.776632 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" event={"ID":"b4d272e9-b579-4bd5-9f3e-ea1afad04611","Type":"ContainerStarted","Data":"b832a3dcd45a6e5ebbca80b9a9fab9ed210b83b62c87a910c4f89cdf5f32f740"} Nov 29 04:17:16 crc kubenswrapper[4631]: I1129 04:17:16.777837 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:20 crc kubenswrapper[4631]: I1129 04:17:20.716320 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:17:20 crc kubenswrapper[4631]: I1129 04:17:20.716769 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:17:26 crc kubenswrapper[4631]: I1129 04:17:26.582484 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" podStartSLOduration=11.582468305 podStartE2EDuration="11.582468305s" podCreationTimestamp="2025-11-29 04:17:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:17:16.808768636 +0000 UTC m=+363.873272190" watchObservedRunningTime="2025-11-29 04:17:26.582468305 +0000 UTC m=+373.646971819" Nov 29 04:17:26 crc kubenswrapper[4631]: I1129 04:17:26.585923 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xt8sx"] Nov 29 04:17:26 crc kubenswrapper[4631]: I1129 04:17:26.586112 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xt8sx" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerName="registry-server" containerID="cri-o://4c572e702fa791de6877570d94ee7631cb56d6efebd7847fed65434ae98b98f6" gracePeriod=2 Nov 29 04:17:26 crc kubenswrapper[4631]: I1129 04:17:26.837759 4631 generic.go:334] "Generic (PLEG): container finished" podID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerID="4c572e702fa791de6877570d94ee7631cb56d6efebd7847fed65434ae98b98f6" exitCode=0 Nov 29 04:17:26 crc kubenswrapper[4631]: I1129 04:17:26.838138 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xt8sx" event={"ID":"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e","Type":"ContainerDied","Data":"4c572e702fa791de6877570d94ee7631cb56d6efebd7847fed65434ae98b98f6"} Nov 29 04:17:26 crc kubenswrapper[4631]: I1129 04:17:26.992952 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.118446 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-utilities\") pod \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.118550 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-catalog-content\") pod \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.118581 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jghbc\" (UniqueName: \"kubernetes.io/projected/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-kube-api-access-jghbc\") pod \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\" (UID: \"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e\") " Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.119414 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-utilities" (OuterVolumeSpecName: "utilities") pod "ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" (UID: "ba2e9065-18a4-4fc3-b8f5-a69a2b04286e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.132113 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-kube-api-access-jghbc" (OuterVolumeSpecName: "kube-api-access-jghbc") pod "ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" (UID: "ba2e9065-18a4-4fc3-b8f5-a69a2b04286e"). InnerVolumeSpecName "kube-api-access-jghbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.134192 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" (UID: "ba2e9065-18a4-4fc3-b8f5-a69a2b04286e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.220151 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.220179 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jghbc\" (UniqueName: \"kubernetes.io/projected/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-kube-api-access-jghbc\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.220206 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.847677 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xt8sx" event={"ID":"ba2e9065-18a4-4fc3-b8f5-a69a2b04286e","Type":"ContainerDied","Data":"f7641711d7577c210a1b4cb8c94b301993db65701d70eea5fdbbd945ec70754d"} Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.847951 4631 scope.go:117] "RemoveContainer" containerID="4c572e702fa791de6877570d94ee7631cb56d6efebd7847fed65434ae98b98f6" Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.847762 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xt8sx" Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.878585 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xt8sx"] Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.882998 4631 scope.go:117] "RemoveContainer" containerID="12c56536a8fe481ae17332bebb1c3667e2f5c079bca3bbf376e08351fbc45f4c" Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.887054 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xt8sx"] Nov 29 04:17:27 crc kubenswrapper[4631]: I1129 04:17:27.905217 4631 scope.go:117] "RemoveContainer" containerID="c845bc47360c7bcd4b034d121fdc853718c06081905893f6f6c654c63acd2b8c" Nov 29 04:17:29 crc kubenswrapper[4631]: I1129 04:17:29.224243 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" path="/var/lib/kubelet/pods/ba2e9065-18a4-4fc3-b8f5-a69a2b04286e/volumes" Nov 29 04:17:35 crc kubenswrapper[4631]: I1129 04:17:35.964403 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-q99pb" Nov 29 04:17:36 crc kubenswrapper[4631]: I1129 04:17:36.051884 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8h4ns"] Nov 29 04:17:50 crc kubenswrapper[4631]: I1129 04:17:50.716642 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:17:50 crc kubenswrapper[4631]: I1129 04:17:50.717136 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.077187 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dnjrt"] Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.077890 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dnjrt" podUID="b7485422-4238-4138-9b71-866a1315b330" containerName="registry-server" containerID="cri-o://5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656" gracePeriod=30 Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.101471 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hplsb"] Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.101711 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hplsb" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" containerName="registry-server" containerID="cri-o://bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc" gracePeriod=30 Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.112195 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kwszg"] Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.112419 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerName="marketplace-operator" containerID="cri-o://8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8" gracePeriod=30 Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.117689 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x9ffd"] Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.118083 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-x9ffd" podUID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerName="registry-server" containerID="cri-o://57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027" gracePeriod=30 Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.131549 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94k48"] Nov 29 04:17:52 crc kubenswrapper[4631]: E1129 04:17:52.132088 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerName="registry-server" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.132195 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerName="registry-server" Nov 29 04:17:52 crc kubenswrapper[4631]: E1129 04:17:52.132301 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerName="extract-utilities" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.132417 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerName="extract-utilities" Nov 29 04:17:52 crc kubenswrapper[4631]: E1129 04:17:52.132517 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerName="extract-content" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.132596 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerName="extract-content" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.132802 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba2e9065-18a4-4fc3-b8f5-a69a2b04286e" containerName="registry-server" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.133428 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.138099 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dq9ls"] Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.138530 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dq9ls" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerName="registry-server" containerID="cri-o://6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e" gracePeriod=30 Nov 29 04:17:52 crc kubenswrapper[4631]: E1129 04:17:52.147326 4631 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027" cmd=["grpc_health_probe","-addr=:50051"] Nov 29 04:17:52 crc kubenswrapper[4631]: E1129 04:17:52.151311 4631 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027" cmd=["grpc_health_probe","-addr=:50051"] Nov 29 04:17:52 crc kubenswrapper[4631]: E1129 04:17:52.153535 4631 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027" cmd=["grpc_health_probe","-addr=:50051"] Nov 29 04:17:52 crc kubenswrapper[4631]: E1129 04:17:52.153576 4631 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-x9ffd" podUID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerName="registry-server" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.178055 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94k48"] Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.202258 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/15665c92-a9d7-4bf9-807a-9f80ce56d8ac-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-94k48\" (UID: \"15665c92-a9d7-4bf9-807a-9f80ce56d8ac\") " pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.202313 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brd6f\" (UniqueName: \"kubernetes.io/projected/15665c92-a9d7-4bf9-807a-9f80ce56d8ac-kube-api-access-brd6f\") pod \"marketplace-operator-79b997595-94k48\" (UID: \"15665c92-a9d7-4bf9-807a-9f80ce56d8ac\") " pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.202445 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/15665c92-a9d7-4bf9-807a-9f80ce56d8ac-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-94k48\" (UID: \"15665c92-a9d7-4bf9-807a-9f80ce56d8ac\") " pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.303798 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/15665c92-a9d7-4bf9-807a-9f80ce56d8ac-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-94k48\" (UID: \"15665c92-a9d7-4bf9-807a-9f80ce56d8ac\") " pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.304116 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brd6f\" (UniqueName: \"kubernetes.io/projected/15665c92-a9d7-4bf9-807a-9f80ce56d8ac-kube-api-access-brd6f\") pod \"marketplace-operator-79b997595-94k48\" (UID: \"15665c92-a9d7-4bf9-807a-9f80ce56d8ac\") " pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.304179 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/15665c92-a9d7-4bf9-807a-9f80ce56d8ac-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-94k48\" (UID: \"15665c92-a9d7-4bf9-807a-9f80ce56d8ac\") " pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.305867 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/15665c92-a9d7-4bf9-807a-9f80ce56d8ac-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-94k48\" (UID: \"15665c92-a9d7-4bf9-807a-9f80ce56d8ac\") " pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.309685 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/15665c92-a9d7-4bf9-807a-9f80ce56d8ac-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-94k48\" (UID: \"15665c92-a9d7-4bf9-807a-9f80ce56d8ac\") " pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.320391 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brd6f\" (UniqueName: \"kubernetes.io/projected/15665c92-a9d7-4bf9-807a-9f80ce56d8ac-kube-api-access-brd6f\") pod \"marketplace-operator-79b997595-94k48\" (UID: \"15665c92-a9d7-4bf9-807a-9f80ce56d8ac\") " pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.449229 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.458014 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.533026 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.614852 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pl95w\" (UniqueName: \"kubernetes.io/projected/dba614c7-ceae-4ce5-afb6-6d082156f640-kube-api-access-pl95w\") pod \"dba614c7-ceae-4ce5-afb6-6d082156f640\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.614972 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-utilities\") pod \"b7485422-4238-4138-9b71-866a1315b330\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.615002 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-trusted-ca\") pod \"dba614c7-ceae-4ce5-afb6-6d082156f640\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.615036 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4qrc\" (UniqueName: \"kubernetes.io/projected/b7485422-4238-4138-9b71-866a1315b330-kube-api-access-l4qrc\") pod \"b7485422-4238-4138-9b71-866a1315b330\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.615054 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-catalog-content\") pod \"b7485422-4238-4138-9b71-866a1315b330\" (UID: \"b7485422-4238-4138-9b71-866a1315b330\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.615229 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-operator-metrics\") pod \"dba614c7-ceae-4ce5-afb6-6d082156f640\" (UID: \"dba614c7-ceae-4ce5-afb6-6d082156f640\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.616611 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "dba614c7-ceae-4ce5-afb6-6d082156f640" (UID: "dba614c7-ceae-4ce5-afb6-6d082156f640"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.618406 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "dba614c7-ceae-4ce5-afb6-6d082156f640" (UID: "dba614c7-ceae-4ce5-afb6-6d082156f640"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.620543 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7485422-4238-4138-9b71-866a1315b330-kube-api-access-l4qrc" (OuterVolumeSpecName: "kube-api-access-l4qrc") pod "b7485422-4238-4138-9b71-866a1315b330" (UID: "b7485422-4238-4138-9b71-866a1315b330"). InnerVolumeSpecName "kube-api-access-l4qrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.625298 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-utilities" (OuterVolumeSpecName: "utilities") pod "b7485422-4238-4138-9b71-866a1315b330" (UID: "b7485422-4238-4138-9b71-866a1315b330"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.649481 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dba614c7-ceae-4ce5-afb6-6d082156f640-kube-api-access-pl95w" (OuterVolumeSpecName: "kube-api-access-pl95w") pod "dba614c7-ceae-4ce5-afb6-6d082156f640" (UID: "dba614c7-ceae-4ce5-afb6-6d082156f640"). InnerVolumeSpecName "kube-api-access-pl95w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.665400 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.668006 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.716168 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.716208 4631 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.716222 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4qrc\" (UniqueName: \"kubernetes.io/projected/b7485422-4238-4138-9b71-866a1315b330-kube-api-access-l4qrc\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.716233 4631 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/dba614c7-ceae-4ce5-afb6-6d082156f640-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.716246 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pl95w\" (UniqueName: \"kubernetes.io/projected/dba614c7-ceae-4ce5-afb6-6d082156f640-kube-api-access-pl95w\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.720137 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.757205 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b7485422-4238-4138-9b71-866a1315b330" (UID: "b7485422-4238-4138-9b71-866a1315b330"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.816796 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-utilities\") pod \"4441569d-edba-4636-b54b-fcfc59f1cd3f\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.816856 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-catalog-content\") pod \"4441569d-edba-4636-b54b-fcfc59f1cd3f\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.816891 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9rvm\" (UniqueName: \"kubernetes.io/projected/8dc666e0-e138-4b85-9ecc-d6af453cdc05-kube-api-access-g9rvm\") pod \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.816909 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-utilities\") pod \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.816927 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gbtp\" (UniqueName: \"kubernetes.io/projected/4441569d-edba-4636-b54b-fcfc59f1cd3f-kube-api-access-6gbtp\") pod \"4441569d-edba-4636-b54b-fcfc59f1cd3f\" (UID: \"4441569d-edba-4636-b54b-fcfc59f1cd3f\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.816958 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-utilities\") pod \"35e15c22-60f2-4df1-994c-368c65c4987a\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.817001 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvkkh\" (UniqueName: \"kubernetes.io/projected/35e15c22-60f2-4df1-994c-368c65c4987a-kube-api-access-hvkkh\") pod \"35e15c22-60f2-4df1-994c-368c65c4987a\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.817039 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-catalog-content\") pod \"35e15c22-60f2-4df1-994c-368c65c4987a\" (UID: \"35e15c22-60f2-4df1-994c-368c65c4987a\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.817054 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-catalog-content\") pod \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\" (UID: \"8dc666e0-e138-4b85-9ecc-d6af453cdc05\") " Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.817235 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7485422-4238-4138-9b71-866a1315b330-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.817435 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-utilities" (OuterVolumeSpecName: "utilities") pod "4441569d-edba-4636-b54b-fcfc59f1cd3f" (UID: "4441569d-edba-4636-b54b-fcfc59f1cd3f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.818061 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-utilities" (OuterVolumeSpecName: "utilities") pod "35e15c22-60f2-4df1-994c-368c65c4987a" (UID: "35e15c22-60f2-4df1-994c-368c65c4987a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.819110 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-utilities" (OuterVolumeSpecName: "utilities") pod "8dc666e0-e138-4b85-9ecc-d6af453cdc05" (UID: "8dc666e0-e138-4b85-9ecc-d6af453cdc05"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.819885 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dc666e0-e138-4b85-9ecc-d6af453cdc05-kube-api-access-g9rvm" (OuterVolumeSpecName: "kube-api-access-g9rvm") pod "8dc666e0-e138-4b85-9ecc-d6af453cdc05" (UID: "8dc666e0-e138-4b85-9ecc-d6af453cdc05"). InnerVolumeSpecName "kube-api-access-g9rvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.820200 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35e15c22-60f2-4df1-994c-368c65c4987a-kube-api-access-hvkkh" (OuterVolumeSpecName: "kube-api-access-hvkkh") pod "35e15c22-60f2-4df1-994c-368c65c4987a" (UID: "35e15c22-60f2-4df1-994c-368c65c4987a"). InnerVolumeSpecName "kube-api-access-hvkkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.821700 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4441569d-edba-4636-b54b-fcfc59f1cd3f-kube-api-access-6gbtp" (OuterVolumeSpecName: "kube-api-access-6gbtp") pod "4441569d-edba-4636-b54b-fcfc59f1cd3f" (UID: "4441569d-edba-4636-b54b-fcfc59f1cd3f"). InnerVolumeSpecName "kube-api-access-6gbtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.840271 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4441569d-edba-4636-b54b-fcfc59f1cd3f" (UID: "4441569d-edba-4636-b54b-fcfc59f1cd3f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.891518 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94k48"] Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.898358 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "35e15c22-60f2-4df1-994c-368c65c4987a" (UID: "35e15c22-60f2-4df1-994c-368c65c4987a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:17:52 crc kubenswrapper[4631]: W1129 04:17:52.900352 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod15665c92_a9d7_4bf9_807a_9f80ce56d8ac.slice/crio-6df3bf5aaa01deed7b7d07eb24bd102377a08e24f3248393a90bc4f9deb198a1 WatchSource:0}: Error finding container 6df3bf5aaa01deed7b7d07eb24bd102377a08e24f3248393a90bc4f9deb198a1: Status 404 returned error can't find the container with id 6df3bf5aaa01deed7b7d07eb24bd102377a08e24f3248393a90bc4f9deb198a1 Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.918116 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.918539 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.918607 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4441569d-edba-4636-b54b-fcfc59f1cd3f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.918682 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9rvm\" (UniqueName: \"kubernetes.io/projected/8dc666e0-e138-4b85-9ecc-d6af453cdc05-kube-api-access-g9rvm\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.918745 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.918808 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gbtp\" (UniqueName: \"kubernetes.io/projected/4441569d-edba-4636-b54b-fcfc59f1cd3f-kube-api-access-6gbtp\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.918869 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35e15c22-60f2-4df1-994c-368c65c4987a-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.918926 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvkkh\" (UniqueName: \"kubernetes.io/projected/35e15c22-60f2-4df1-994c-368c65c4987a-kube-api-access-hvkkh\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:52 crc kubenswrapper[4631]: I1129 04:17:52.931658 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8dc666e0-e138-4b85-9ecc-d6af453cdc05" (UID: "8dc666e0-e138-4b85-9ecc-d6af453cdc05"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.020577 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dc666e0-e138-4b85-9ecc-d6af453cdc05-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.047438 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-94k48" event={"ID":"15665c92-a9d7-4bf9-807a-9f80ce56d8ac","Type":"ContainerStarted","Data":"31aa274407dc682304345dfb52b7fd415fff78b9ed6b4785edd9301d3ea815e3"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.047496 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-94k48" event={"ID":"15665c92-a9d7-4bf9-807a-9f80ce56d8ac","Type":"ContainerStarted","Data":"6df3bf5aaa01deed7b7d07eb24bd102377a08e24f3248393a90bc4f9deb198a1"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.048149 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.049908 4631 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-94k48 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.62:8080/healthz\": dial tcp 10.217.0.62:8080: connect: connection refused" start-of-body= Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.049966 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-94k48" podUID="15665c92-a9d7-4bf9-807a-9f80ce56d8ac" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.62:8080/healthz\": dial tcp 10.217.0.62:8080: connect: connection refused" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.052470 4631 generic.go:334] "Generic (PLEG): container finished" podID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerID="8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8" exitCode=0 Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.052527 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" event={"ID":"dba614c7-ceae-4ce5-afb6-6d082156f640","Type":"ContainerDied","Data":"8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.052549 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" event={"ID":"dba614c7-ceae-4ce5-afb6-6d082156f640","Type":"ContainerDied","Data":"452c7232c4562035acea8c7c690fb02492e6e73fe54f38a746cc6b2136e24a85"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.052571 4631 scope.go:117] "RemoveContainer" containerID="8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.052692 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kwszg" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.057130 4631 generic.go:334] "Generic (PLEG): container finished" podID="35e15c22-60f2-4df1-994c-368c65c4987a" containerID="bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc" exitCode=0 Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.057186 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hplsb" event={"ID":"35e15c22-60f2-4df1-994c-368c65c4987a","Type":"ContainerDied","Data":"bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.057210 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hplsb" event={"ID":"35e15c22-60f2-4df1-994c-368c65c4987a","Type":"ContainerDied","Data":"014e6f90a26fd3778feed9758ea664655d77a073ea33c0e02013155259753763"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.057258 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hplsb" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.068062 4631 generic.go:334] "Generic (PLEG): container finished" podID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerID="57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027" exitCode=0 Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.068144 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9ffd" event={"ID":"4441569d-edba-4636-b54b-fcfc59f1cd3f","Type":"ContainerDied","Data":"57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.068177 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x9ffd" event={"ID":"4441569d-edba-4636-b54b-fcfc59f1cd3f","Type":"ContainerDied","Data":"72d3106bb1f1fb589e6859678734fef94f9958bd0f2180faa074c7a5ea60e0f9"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.068143 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x9ffd" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.095106 4631 generic.go:334] "Generic (PLEG): container finished" podID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerID="6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e" exitCode=0 Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.095262 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dq9ls" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.095812 4631 scope.go:117] "RemoveContainer" containerID="0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.095916 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dq9ls" event={"ID":"8dc666e0-e138-4b85-9ecc-d6af453cdc05","Type":"ContainerDied","Data":"6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.095964 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dq9ls" event={"ID":"8dc666e0-e138-4b85-9ecc-d6af453cdc05","Type":"ContainerDied","Data":"c916a61053fb8c6c83448e14f0f1d4464e518e75a59c1b075394b02cd9956178"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.096054 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-94k48" podStartSLOduration=1.096030102 podStartE2EDuration="1.096030102s" podCreationTimestamp="2025-11-29 04:17:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:17:53.069267117 +0000 UTC m=+400.133770651" watchObservedRunningTime="2025-11-29 04:17:53.096030102 +0000 UTC m=+400.160533636" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.100879 4631 generic.go:334] "Generic (PLEG): container finished" podID="b7485422-4238-4138-9b71-866a1315b330" containerID="5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656" exitCode=0 Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.100919 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnjrt" event={"ID":"b7485422-4238-4138-9b71-866a1315b330","Type":"ContainerDied","Data":"5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.100934 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dnjrt" event={"ID":"b7485422-4238-4138-9b71-866a1315b330","Type":"ContainerDied","Data":"e96e9b1640159d5f5030507a5d3493f3c7cbddc8ad041608cd262bb3d02260a7"} Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.101024 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dnjrt" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.103238 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kwszg"] Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.123772 4631 scope.go:117] "RemoveContainer" containerID="8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.125016 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8\": container with ID starting with 8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8 not found: ID does not exist" containerID="8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.125054 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8"} err="failed to get container status \"8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8\": rpc error: code = NotFound desc = could not find container \"8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8\": container with ID starting with 8294b18094dc6a62f323aacb5930fbae8b4bd916ac53cf705ff65d2b4ef767f8 not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.125077 4631 scope.go:117] "RemoveContainer" containerID="0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.125822 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928\": container with ID starting with 0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928 not found: ID does not exist" containerID="0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.125849 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928"} err="failed to get container status \"0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928\": rpc error: code = NotFound desc = could not find container \"0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928\": container with ID starting with 0fe17d4a5410c1e7df64911716f0a9a6385019e74ba6a38a0193bdf6c85b3928 not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.125871 4631 scope.go:117] "RemoveContainer" containerID="bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.126070 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kwszg"] Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.136560 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x9ffd"] Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.140441 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-x9ffd"] Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.144436 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hplsb"] Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.154217 4631 scope.go:117] "RemoveContainer" containerID="01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.161286 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hplsb"] Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.174694 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dnjrt"] Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.180845 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dnjrt"] Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.186237 4631 scope.go:117] "RemoveContainer" containerID="97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.187231 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dq9ls"] Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.191747 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dq9ls"] Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.207675 4631 scope.go:117] "RemoveContainer" containerID="bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.208088 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc\": container with ID starting with bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc not found: ID does not exist" containerID="bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.208123 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc"} err="failed to get container status \"bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc\": rpc error: code = NotFound desc = could not find container \"bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc\": container with ID starting with bcdce72ab3da0d565261bc982ccd9f50d8a9fe3fb7960d404acb821d3ef642dc not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.208148 4631 scope.go:117] "RemoveContainer" containerID="01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.208620 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22\": container with ID starting with 01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22 not found: ID does not exist" containerID="01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.208646 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22"} err="failed to get container status \"01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22\": rpc error: code = NotFound desc = could not find container \"01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22\": container with ID starting with 01d99451467a5c3e1e812890e7cd532d3611d986b57900a3d56da04c0751bf22 not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.208667 4631 scope.go:117] "RemoveContainer" containerID="97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.209146 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73\": container with ID starting with 97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73 not found: ID does not exist" containerID="97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.209184 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73"} err="failed to get container status \"97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73\": rpc error: code = NotFound desc = could not find container \"97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73\": container with ID starting with 97e988b13bdb8b974162b7fff38339a58ad7f7694956ff6193d5adc99d74fa73 not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.209205 4631 scope.go:117] "RemoveContainer" containerID="57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.223144 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" path="/var/lib/kubelet/pods/35e15c22-60f2-4df1-994c-368c65c4987a/volumes" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.223944 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4441569d-edba-4636-b54b-fcfc59f1cd3f" path="/var/lib/kubelet/pods/4441569d-edba-4636-b54b-fcfc59f1cd3f/volumes" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.224874 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" path="/var/lib/kubelet/pods/8dc666e0-e138-4b85-9ecc-d6af453cdc05/volumes" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.226069 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7485422-4238-4138-9b71-866a1315b330" path="/var/lib/kubelet/pods/b7485422-4238-4138-9b71-866a1315b330/volumes" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.226865 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" path="/var/lib/kubelet/pods/dba614c7-ceae-4ce5-afb6-6d082156f640/volumes" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.250472 4631 scope.go:117] "RemoveContainer" containerID="01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.267951 4631 scope.go:117] "RemoveContainer" containerID="d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.285826 4631 scope.go:117] "RemoveContainer" containerID="57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.286191 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027\": container with ID starting with 57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027 not found: ID does not exist" containerID="57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.286229 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027"} err="failed to get container status \"57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027\": rpc error: code = NotFound desc = could not find container \"57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027\": container with ID starting with 57d1abfc8c282e9b077627420f541e5246763bf9892587c976c2f694eb01c027 not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.286253 4631 scope.go:117] "RemoveContainer" containerID="01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.290534 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db\": container with ID starting with 01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db not found: ID does not exist" containerID="01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.290645 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db"} err="failed to get container status \"01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db\": rpc error: code = NotFound desc = could not find container \"01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db\": container with ID starting with 01024fdc79d7840dee6fbe0dcfb0be98717d5df4e259eae86cff2fb4cdac19db not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.290674 4631 scope.go:117] "RemoveContainer" containerID="d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.291579 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff\": container with ID starting with d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff not found: ID does not exist" containerID="d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.291600 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff"} err="failed to get container status \"d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff\": rpc error: code = NotFound desc = could not find container \"d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff\": container with ID starting with d6024b4b7d64f5251caa6114c84b5d3ee7d44e6f8a9853955b8b66d337e29bff not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.291615 4631 scope.go:117] "RemoveContainer" containerID="6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.302987 4631 scope.go:117] "RemoveContainer" containerID="70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.317501 4631 scope.go:117] "RemoveContainer" containerID="d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.330697 4631 scope.go:117] "RemoveContainer" containerID="6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.331362 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e\": container with ID starting with 6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e not found: ID does not exist" containerID="6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.331400 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e"} err="failed to get container status \"6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e\": rpc error: code = NotFound desc = could not find container \"6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e\": container with ID starting with 6e02d4b331f80ea73b690794babd14e2527959e4ebd960faf8ece71f5bfb632e not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.331425 4631 scope.go:117] "RemoveContainer" containerID="70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.331721 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c\": container with ID starting with 70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c not found: ID does not exist" containerID="70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.331755 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c"} err="failed to get container status \"70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c\": rpc error: code = NotFound desc = could not find container \"70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c\": container with ID starting with 70e12bbcc77b931d9a9e8deaaa79def9ea17377c526288d58e9fe0a8f2d8941c not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.331782 4631 scope.go:117] "RemoveContainer" containerID="d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.332176 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13\": container with ID starting with d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13 not found: ID does not exist" containerID="d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.332208 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13"} err="failed to get container status \"d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13\": rpc error: code = NotFound desc = could not find container \"d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13\": container with ID starting with d1081bc95f4aeee2bcbf3ce3f83a016ffb55797b42c0e9c7479f360b2d785e13 not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.332228 4631 scope.go:117] "RemoveContainer" containerID="5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.344620 4631 scope.go:117] "RemoveContainer" containerID="78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.356669 4631 scope.go:117] "RemoveContainer" containerID="5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.368811 4631 scope.go:117] "RemoveContainer" containerID="5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.369158 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656\": container with ID starting with 5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656 not found: ID does not exist" containerID="5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.369189 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656"} err="failed to get container status \"5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656\": rpc error: code = NotFound desc = could not find container \"5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656\": container with ID starting with 5834b5d86bc384a28a03efac9134221387381043b1214ac8d8c288fea0abb656 not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.369210 4631 scope.go:117] "RemoveContainer" containerID="78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.369534 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d\": container with ID starting with 78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d not found: ID does not exist" containerID="78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.369556 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d"} err="failed to get container status \"78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d\": rpc error: code = NotFound desc = could not find container \"78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d\": container with ID starting with 78c5729280d02550dd1fff5ae1e513d3a1c91be2911fec1bebd56d4d09d6a50d not found: ID does not exist" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.369569 4631 scope.go:117] "RemoveContainer" containerID="5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270" Nov 29 04:17:53 crc kubenswrapper[4631]: E1129 04:17:53.369957 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270\": container with ID starting with 5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270 not found: ID does not exist" containerID="5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270" Nov 29 04:17:53 crc kubenswrapper[4631]: I1129 04:17:53.369978 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270"} err="failed to get container status \"5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270\": rpc error: code = NotFound desc = could not find container \"5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270\": container with ID starting with 5c9f1b4716345f5c41f10d6a9a27a655d58c2c34dce053b27a811a68f8625270 not found: ID does not exist" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.119489 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-94k48" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.284701 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p5b4b"] Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.284930 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.284948 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.284963 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.284971 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.284984 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerName="marketplace-operator" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.284994 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerName="marketplace-operator" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285004 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerName="marketplace-operator" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285012 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerName="marketplace-operator" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285026 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" containerName="extract-utilities" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285034 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" containerName="extract-utilities" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285045 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerName="extract-content" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285053 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerName="extract-content" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285066 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerName="extract-utilities" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285074 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerName="extract-utilities" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285084 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" containerName="extract-content" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285091 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" containerName="extract-content" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285104 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7485422-4238-4138-9b71-866a1315b330" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285112 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7485422-4238-4138-9b71-866a1315b330" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285123 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerName="extract-utilities" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285133 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerName="extract-utilities" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285144 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerName="extract-content" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285152 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerName="extract-content" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285162 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285170 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285181 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7485422-4238-4138-9b71-866a1315b330" containerName="extract-content" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285189 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7485422-4238-4138-9b71-866a1315b330" containerName="extract-content" Nov 29 04:17:54 crc kubenswrapper[4631]: E1129 04:17:54.285205 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7485422-4238-4138-9b71-866a1315b330" containerName="extract-utilities" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285213 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7485422-4238-4138-9b71-866a1315b330" containerName="extract-utilities" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285347 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="35e15c22-60f2-4df1-994c-368c65c4987a" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285365 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerName="marketplace-operator" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285375 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dc666e0-e138-4b85-9ecc-d6af453cdc05" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285389 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7485422-4238-4138-9b71-866a1315b330" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285400 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="4441569d-edba-4636-b54b-fcfc59f1cd3f" containerName="registry-server" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.285618 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="dba614c7-ceae-4ce5-afb6-6d082156f640" containerName="marketplace-operator" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.286253 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.288326 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.294032 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p5b4b"] Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.385295 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e28fdaa-deb3-42c8-8b84-0feac17ca652-utilities\") pod \"certified-operators-p5b4b\" (UID: \"1e28fdaa-deb3-42c8-8b84-0feac17ca652\") " pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.385375 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5k9c\" (UniqueName: \"kubernetes.io/projected/1e28fdaa-deb3-42c8-8b84-0feac17ca652-kube-api-access-w5k9c\") pod \"certified-operators-p5b4b\" (UID: \"1e28fdaa-deb3-42c8-8b84-0feac17ca652\") " pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.385404 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e28fdaa-deb3-42c8-8b84-0feac17ca652-catalog-content\") pod \"certified-operators-p5b4b\" (UID: \"1e28fdaa-deb3-42c8-8b84-0feac17ca652\") " pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.486885 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e28fdaa-deb3-42c8-8b84-0feac17ca652-utilities\") pod \"certified-operators-p5b4b\" (UID: \"1e28fdaa-deb3-42c8-8b84-0feac17ca652\") " pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.492767 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mzpdn"] Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.493387 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e28fdaa-deb3-42c8-8b84-0feac17ca652-utilities\") pod \"certified-operators-p5b4b\" (UID: \"1e28fdaa-deb3-42c8-8b84-0feac17ca652\") " pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.493557 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5k9c\" (UniqueName: \"kubernetes.io/projected/1e28fdaa-deb3-42c8-8b84-0feac17ca652-kube-api-access-w5k9c\") pod \"certified-operators-p5b4b\" (UID: \"1e28fdaa-deb3-42c8-8b84-0feac17ca652\") " pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.493621 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.493707 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e28fdaa-deb3-42c8-8b84-0feac17ca652-catalog-content\") pod \"certified-operators-p5b4b\" (UID: \"1e28fdaa-deb3-42c8-8b84-0feac17ca652\") " pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.494068 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e28fdaa-deb3-42c8-8b84-0feac17ca652-catalog-content\") pod \"certified-operators-p5b4b\" (UID: \"1e28fdaa-deb3-42c8-8b84-0feac17ca652\") " pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.496267 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.505434 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mzpdn"] Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.525446 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5k9c\" (UniqueName: \"kubernetes.io/projected/1e28fdaa-deb3-42c8-8b84-0feac17ca652-kube-api-access-w5k9c\") pod \"certified-operators-p5b4b\" (UID: \"1e28fdaa-deb3-42c8-8b84-0feac17ca652\") " pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.594538 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnszq\" (UniqueName: \"kubernetes.io/projected/8e16e7df-c41e-47e9-a5c5-2e6af04decf7-kube-api-access-xnszq\") pod \"redhat-marketplace-mzpdn\" (UID: \"8e16e7df-c41e-47e9-a5c5-2e6af04decf7\") " pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.594831 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e16e7df-c41e-47e9-a5c5-2e6af04decf7-utilities\") pod \"redhat-marketplace-mzpdn\" (UID: \"8e16e7df-c41e-47e9-a5c5-2e6af04decf7\") " pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.594906 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e16e7df-c41e-47e9-a5c5-2e6af04decf7-catalog-content\") pod \"redhat-marketplace-mzpdn\" (UID: \"8e16e7df-c41e-47e9-a5c5-2e6af04decf7\") " pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.617249 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.695542 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e16e7df-c41e-47e9-a5c5-2e6af04decf7-utilities\") pod \"redhat-marketplace-mzpdn\" (UID: \"8e16e7df-c41e-47e9-a5c5-2e6af04decf7\") " pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.695583 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e16e7df-c41e-47e9-a5c5-2e6af04decf7-catalog-content\") pod \"redhat-marketplace-mzpdn\" (UID: \"8e16e7df-c41e-47e9-a5c5-2e6af04decf7\") " pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.695606 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnszq\" (UniqueName: \"kubernetes.io/projected/8e16e7df-c41e-47e9-a5c5-2e6af04decf7-kube-api-access-xnszq\") pod \"redhat-marketplace-mzpdn\" (UID: \"8e16e7df-c41e-47e9-a5c5-2e6af04decf7\") " pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.696261 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e16e7df-c41e-47e9-a5c5-2e6af04decf7-utilities\") pod \"redhat-marketplace-mzpdn\" (UID: \"8e16e7df-c41e-47e9-a5c5-2e6af04decf7\") " pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.696500 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e16e7df-c41e-47e9-a5c5-2e6af04decf7-catalog-content\") pod \"redhat-marketplace-mzpdn\" (UID: \"8e16e7df-c41e-47e9-a5c5-2e6af04decf7\") " pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.711295 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnszq\" (UniqueName: \"kubernetes.io/projected/8e16e7df-c41e-47e9-a5c5-2e6af04decf7-kube-api-access-xnszq\") pod \"redhat-marketplace-mzpdn\" (UID: \"8e16e7df-c41e-47e9-a5c5-2e6af04decf7\") " pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:54 crc kubenswrapper[4631]: I1129 04:17:54.811837 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:17:55 crc kubenswrapper[4631]: I1129 04:17:55.034005 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p5b4b"] Nov 29 04:17:55 crc kubenswrapper[4631]: W1129 04:17:55.040838 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e28fdaa_deb3_42c8_8b84_0feac17ca652.slice/crio-c54f8e3162db67be378845116abf17076c598a2d90c32d3c67573981cd85b89c WatchSource:0}: Error finding container c54f8e3162db67be378845116abf17076c598a2d90c32d3c67573981cd85b89c: Status 404 returned error can't find the container with id c54f8e3162db67be378845116abf17076c598a2d90c32d3c67573981cd85b89c Nov 29 04:17:55 crc kubenswrapper[4631]: I1129 04:17:55.135319 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5b4b" event={"ID":"1e28fdaa-deb3-42c8-8b84-0feac17ca652","Type":"ContainerStarted","Data":"c54f8e3162db67be378845116abf17076c598a2d90c32d3c67573981cd85b89c"} Nov 29 04:17:55 crc kubenswrapper[4631]: I1129 04:17:55.183801 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mzpdn"] Nov 29 04:17:55 crc kubenswrapper[4631]: W1129 04:17:55.198771 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e16e7df_c41e_47e9_a5c5_2e6af04decf7.slice/crio-be1cd269dc234a4b6645907a187597cd5ebc996905ade18c65df838df477baae WatchSource:0}: Error finding container be1cd269dc234a4b6645907a187597cd5ebc996905ade18c65df838df477baae: Status 404 returned error can't find the container with id be1cd269dc234a4b6645907a187597cd5ebc996905ade18c65df838df477baae Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.140974 4631 generic.go:334] "Generic (PLEG): container finished" podID="1e28fdaa-deb3-42c8-8b84-0feac17ca652" containerID="824ca8b5272467c33dfc40e82eae6c58b2c375d6f4186feb8574acfeee9bf49d" exitCode=0 Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.141021 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5b4b" event={"ID":"1e28fdaa-deb3-42c8-8b84-0feac17ca652","Type":"ContainerDied","Data":"824ca8b5272467c33dfc40e82eae6c58b2c375d6f4186feb8574acfeee9bf49d"} Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.142102 4631 generic.go:334] "Generic (PLEG): container finished" podID="8e16e7df-c41e-47e9-a5c5-2e6af04decf7" containerID="c439bb41ad6ec4293c6f2fb54891c0a28b303926f0eaa211426bbfc539682caa" exitCode=0 Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.142151 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzpdn" event={"ID":"8e16e7df-c41e-47e9-a5c5-2e6af04decf7","Type":"ContainerDied","Data":"c439bb41ad6ec4293c6f2fb54891c0a28b303926f0eaa211426bbfc539682caa"} Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.142184 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzpdn" event={"ID":"8e16e7df-c41e-47e9-a5c5-2e6af04decf7","Type":"ContainerStarted","Data":"be1cd269dc234a4b6645907a187597cd5ebc996905ade18c65df838df477baae"} Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.686599 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vrqjc"] Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.689263 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.699750 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.712666 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vrqjc"] Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.823454 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfglh\" (UniqueName: \"kubernetes.io/projected/1a5eb073-d0f6-4980-b381-dfa3d6cab81e-kube-api-access-kfglh\") pod \"redhat-operators-vrqjc\" (UID: \"1a5eb073-d0f6-4980-b381-dfa3d6cab81e\") " pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.823501 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a5eb073-d0f6-4980-b381-dfa3d6cab81e-utilities\") pod \"redhat-operators-vrqjc\" (UID: \"1a5eb073-d0f6-4980-b381-dfa3d6cab81e\") " pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.823543 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a5eb073-d0f6-4980-b381-dfa3d6cab81e-catalog-content\") pod \"redhat-operators-vrqjc\" (UID: \"1a5eb073-d0f6-4980-b381-dfa3d6cab81e\") " pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.892806 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5gtpk"] Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.893801 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.900609 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.904161 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5gtpk"] Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.924454 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-utilities\") pod \"community-operators-5gtpk\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.924507 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a5eb073-d0f6-4980-b381-dfa3d6cab81e-catalog-content\") pod \"redhat-operators-vrqjc\" (UID: \"1a5eb073-d0f6-4980-b381-dfa3d6cab81e\") " pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.924594 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-catalog-content\") pod \"community-operators-5gtpk\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.924628 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hddsx\" (UniqueName: \"kubernetes.io/projected/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-kube-api-access-hddsx\") pod \"community-operators-5gtpk\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.924656 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfglh\" (UniqueName: \"kubernetes.io/projected/1a5eb073-d0f6-4980-b381-dfa3d6cab81e-kube-api-access-kfglh\") pod \"redhat-operators-vrqjc\" (UID: \"1a5eb073-d0f6-4980-b381-dfa3d6cab81e\") " pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.924683 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a5eb073-d0f6-4980-b381-dfa3d6cab81e-utilities\") pod \"redhat-operators-vrqjc\" (UID: \"1a5eb073-d0f6-4980-b381-dfa3d6cab81e\") " pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.925126 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a5eb073-d0f6-4980-b381-dfa3d6cab81e-utilities\") pod \"redhat-operators-vrqjc\" (UID: \"1a5eb073-d0f6-4980-b381-dfa3d6cab81e\") " pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.925227 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a5eb073-d0f6-4980-b381-dfa3d6cab81e-catalog-content\") pod \"redhat-operators-vrqjc\" (UID: \"1a5eb073-d0f6-4980-b381-dfa3d6cab81e\") " pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:56 crc kubenswrapper[4631]: I1129 04:17:56.951876 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfglh\" (UniqueName: \"kubernetes.io/projected/1a5eb073-d0f6-4980-b381-dfa3d6cab81e-kube-api-access-kfglh\") pod \"redhat-operators-vrqjc\" (UID: \"1a5eb073-d0f6-4980-b381-dfa3d6cab81e\") " pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:57 crc kubenswrapper[4631]: I1129 04:17:57.026431 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-utilities\") pod \"community-operators-5gtpk\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:57 crc kubenswrapper[4631]: I1129 04:17:57.026508 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-catalog-content\") pod \"community-operators-5gtpk\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:57 crc kubenswrapper[4631]: I1129 04:17:57.026538 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hddsx\" (UniqueName: \"kubernetes.io/projected/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-kube-api-access-hddsx\") pod \"community-operators-5gtpk\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:57 crc kubenswrapper[4631]: I1129 04:17:57.026925 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-catalog-content\") pod \"community-operators-5gtpk\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:57 crc kubenswrapper[4631]: I1129 04:17:57.027138 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-utilities\") pod \"community-operators-5gtpk\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:57 crc kubenswrapper[4631]: I1129 04:17:57.042368 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hddsx\" (UniqueName: \"kubernetes.io/projected/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-kube-api-access-hddsx\") pod \"community-operators-5gtpk\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:57 crc kubenswrapper[4631]: I1129 04:17:57.055518 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:17:57 crc kubenswrapper[4631]: I1129 04:17:57.218564 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:17:57 crc kubenswrapper[4631]: I1129 04:17:57.477466 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vrqjc"] Nov 29 04:17:57 crc kubenswrapper[4631]: W1129 04:17:57.482473 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a5eb073_d0f6_4980_b381_dfa3d6cab81e.slice/crio-92f3c120157c34c71abfb0682777edb38b915225d4af23646c399c7426c0bfc3 WatchSource:0}: Error finding container 92f3c120157c34c71abfb0682777edb38b915225d4af23646c399c7426c0bfc3: Status 404 returned error can't find the container with id 92f3c120157c34c71abfb0682777edb38b915225d4af23646c399c7426c0bfc3 Nov 29 04:17:57 crc kubenswrapper[4631]: I1129 04:17:57.636703 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5gtpk"] Nov 29 04:17:57 crc kubenswrapper[4631]: W1129 04:17:57.675582 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f2dce73_ee2d_4566_bdc2_e5daf964ca77.slice/crio-d4fc6bbd4dc9cdb4c0ebde132c730af4e5d5d94b63fc62f9329b1c4d61c1f9b8 WatchSource:0}: Error finding container d4fc6bbd4dc9cdb4c0ebde132c730af4e5d5d94b63fc62f9329b1c4d61c1f9b8: Status 404 returned error can't find the container with id d4fc6bbd4dc9cdb4c0ebde132c730af4e5d5d94b63fc62f9329b1c4d61c1f9b8 Nov 29 04:17:58 crc kubenswrapper[4631]: I1129 04:17:58.153702 4631 generic.go:334] "Generic (PLEG): container finished" podID="8e16e7df-c41e-47e9-a5c5-2e6af04decf7" containerID="e7aa07fbff3430b9ed23750844c0ee45827beec844dd542a7a905d3e9e7ab229" exitCode=0 Nov 29 04:17:58 crc kubenswrapper[4631]: I1129 04:17:58.153758 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzpdn" event={"ID":"8e16e7df-c41e-47e9-a5c5-2e6af04decf7","Type":"ContainerDied","Data":"e7aa07fbff3430b9ed23750844c0ee45827beec844dd542a7a905d3e9e7ab229"} Nov 29 04:17:58 crc kubenswrapper[4631]: I1129 04:17:58.155866 4631 generic.go:334] "Generic (PLEG): container finished" podID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerID="c81274a7158ac0505b263d6ffdaca587dbaa723f742d5e8bc09b541c02132ece" exitCode=0 Nov 29 04:17:58 crc kubenswrapper[4631]: I1129 04:17:58.155940 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5gtpk" event={"ID":"9f2dce73-ee2d-4566-bdc2-e5daf964ca77","Type":"ContainerDied","Data":"c81274a7158ac0505b263d6ffdaca587dbaa723f742d5e8bc09b541c02132ece"} Nov 29 04:17:58 crc kubenswrapper[4631]: I1129 04:17:58.155969 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5gtpk" event={"ID":"9f2dce73-ee2d-4566-bdc2-e5daf964ca77","Type":"ContainerStarted","Data":"d4fc6bbd4dc9cdb4c0ebde132c730af4e5d5d94b63fc62f9329b1c4d61c1f9b8"} Nov 29 04:17:58 crc kubenswrapper[4631]: I1129 04:17:58.158763 4631 generic.go:334] "Generic (PLEG): container finished" podID="1a5eb073-d0f6-4980-b381-dfa3d6cab81e" containerID="4695952a2919f5f53b6fa4228d5b7e8e53d5b8090ca933c2139bca35c5ea0353" exitCode=0 Nov 29 04:17:58 crc kubenswrapper[4631]: I1129 04:17:58.158829 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjc" event={"ID":"1a5eb073-d0f6-4980-b381-dfa3d6cab81e","Type":"ContainerDied","Data":"4695952a2919f5f53b6fa4228d5b7e8e53d5b8090ca933c2139bca35c5ea0353"} Nov 29 04:17:58 crc kubenswrapper[4631]: I1129 04:17:58.158845 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjc" event={"ID":"1a5eb073-d0f6-4980-b381-dfa3d6cab81e","Type":"ContainerStarted","Data":"92f3c120157c34c71abfb0682777edb38b915225d4af23646c399c7426c0bfc3"} Nov 29 04:17:58 crc kubenswrapper[4631]: I1129 04:17:58.160406 4631 generic.go:334] "Generic (PLEG): container finished" podID="1e28fdaa-deb3-42c8-8b84-0feac17ca652" containerID="897eb9a06025fe7e25540189a6969a967a0a6235ba1f3985f0541c6de9fb04d7" exitCode=0 Nov 29 04:17:58 crc kubenswrapper[4631]: I1129 04:17:58.160423 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5b4b" event={"ID":"1e28fdaa-deb3-42c8-8b84-0feac17ca652","Type":"ContainerDied","Data":"897eb9a06025fe7e25540189a6969a967a0a6235ba1f3985f0541c6de9fb04d7"} Nov 29 04:17:59 crc kubenswrapper[4631]: I1129 04:17:59.167680 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjc" event={"ID":"1a5eb073-d0f6-4980-b381-dfa3d6cab81e","Type":"ContainerStarted","Data":"aa33467925d726946e5bb72d492e210c06d189dfe5afa5f4a55df80d6215d799"} Nov 29 04:17:59 crc kubenswrapper[4631]: I1129 04:17:59.170563 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p5b4b" event={"ID":"1e28fdaa-deb3-42c8-8b84-0feac17ca652","Type":"ContainerStarted","Data":"00bf6e3ff4797ddcfdb7be9378bb771a2f7823176e593a1d2c28a6dab68bc3fd"} Nov 29 04:17:59 crc kubenswrapper[4631]: I1129 04:17:59.172619 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mzpdn" event={"ID":"8e16e7df-c41e-47e9-a5c5-2e6af04decf7","Type":"ContainerStarted","Data":"835a10bfc7e5dd7a1e37f4a16ce7ff97bd316a7647daa137fada4875f54cc3e1"} Nov 29 04:17:59 crc kubenswrapper[4631]: I1129 04:17:59.174768 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5gtpk" event={"ID":"9f2dce73-ee2d-4566-bdc2-e5daf964ca77","Type":"ContainerStarted","Data":"f71ab882924f8d98bb9a4d23877ba3bdded0e7032c8b5f7a67f70b3125626613"} Nov 29 04:17:59 crc kubenswrapper[4631]: I1129 04:17:59.245827 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mzpdn" podStartSLOduration=2.74051963 podStartE2EDuration="5.245809406s" podCreationTimestamp="2025-11-29 04:17:54 +0000 UTC" firstStartedPulling="2025-11-29 04:17:56.146064791 +0000 UTC m=+403.210568315" lastFinishedPulling="2025-11-29 04:17:58.651354557 +0000 UTC m=+405.715858091" observedRunningTime="2025-11-29 04:17:59.243130519 +0000 UTC m=+406.307634043" watchObservedRunningTime="2025-11-29 04:17:59.245809406 +0000 UTC m=+406.310312910" Nov 29 04:17:59 crc kubenswrapper[4631]: I1129 04:17:59.246952 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p5b4b" podStartSLOduration=2.7471259740000002 podStartE2EDuration="5.246948084s" podCreationTimestamp="2025-11-29 04:17:54 +0000 UTC" firstStartedPulling="2025-11-29 04:17:56.142492072 +0000 UTC m=+403.206995596" lastFinishedPulling="2025-11-29 04:17:58.642314162 +0000 UTC m=+405.706817706" observedRunningTime="2025-11-29 04:17:59.229270114 +0000 UTC m=+406.293773628" watchObservedRunningTime="2025-11-29 04:17:59.246948084 +0000 UTC m=+406.311451598" Nov 29 04:18:00 crc kubenswrapper[4631]: I1129 04:18:00.192897 4631 generic.go:334] "Generic (PLEG): container finished" podID="1a5eb073-d0f6-4980-b381-dfa3d6cab81e" containerID="aa33467925d726946e5bb72d492e210c06d189dfe5afa5f4a55df80d6215d799" exitCode=0 Nov 29 04:18:00 crc kubenswrapper[4631]: I1129 04:18:00.193560 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjc" event={"ID":"1a5eb073-d0f6-4980-b381-dfa3d6cab81e","Type":"ContainerDied","Data":"aa33467925d726946e5bb72d492e210c06d189dfe5afa5f4a55df80d6215d799"} Nov 29 04:18:00 crc kubenswrapper[4631]: I1129 04:18:00.198941 4631 generic.go:334] "Generic (PLEG): container finished" podID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerID="f71ab882924f8d98bb9a4d23877ba3bdded0e7032c8b5f7a67f70b3125626613" exitCode=0 Nov 29 04:18:00 crc kubenswrapper[4631]: I1129 04:18:00.199007 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5gtpk" event={"ID":"9f2dce73-ee2d-4566-bdc2-e5daf964ca77","Type":"ContainerDied","Data":"f71ab882924f8d98bb9a4d23877ba3bdded0e7032c8b5f7a67f70b3125626613"} Nov 29 04:18:01 crc kubenswrapper[4631]: I1129 04:18:01.104027 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" podUID="171d32d8-1dcb-497d-9724-d798414c5602" containerName="registry" containerID="cri-o://f3a77794fb6b523a46952be09bac56829ce220363132612ae2381218692b0788" gracePeriod=30 Nov 29 04:18:01 crc kubenswrapper[4631]: I1129 04:18:01.205399 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjc" event={"ID":"1a5eb073-d0f6-4980-b381-dfa3d6cab81e","Type":"ContainerStarted","Data":"499cfdbde4b54082503790d07d58e6ed5584b0fbec0b38fb27c83b411505620f"} Nov 29 04:18:01 crc kubenswrapper[4631]: I1129 04:18:01.207723 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5gtpk" event={"ID":"9f2dce73-ee2d-4566-bdc2-e5daf964ca77","Type":"ContainerStarted","Data":"fc383deb83a72fdbc9a3b4653d39c74a1dbc133069ed04527a2ee87c429aa4a0"} Nov 29 04:18:01 crc kubenswrapper[4631]: I1129 04:18:01.274377 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vrqjc" podStartSLOduration=2.79935524 podStartE2EDuration="5.274362733s" podCreationTimestamp="2025-11-29 04:17:56 +0000 UTC" firstStartedPulling="2025-11-29 04:17:58.16107458 +0000 UTC m=+405.225578094" lastFinishedPulling="2025-11-29 04:18:00.636082073 +0000 UTC m=+407.700585587" observedRunningTime="2025-11-29 04:18:01.225476596 +0000 UTC m=+408.289980100" watchObservedRunningTime="2025-11-29 04:18:01.274362733 +0000 UTC m=+408.338866247" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.214103 4631 generic.go:334] "Generic (PLEG): container finished" podID="171d32d8-1dcb-497d-9724-d798414c5602" containerID="f3a77794fb6b523a46952be09bac56829ce220363132612ae2381218692b0788" exitCode=0 Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.214191 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" event={"ID":"171d32d8-1dcb-497d-9724-d798414c5602","Type":"ContainerDied","Data":"f3a77794fb6b523a46952be09bac56829ce220363132612ae2381218692b0788"} Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.214607 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" event={"ID":"171d32d8-1dcb-497d-9724-d798414c5602","Type":"ContainerDied","Data":"3f10412e3248c4e07707f7ae1bb83cc7dbf20c2df1f059168171e1121fa4a798"} Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.214640 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f10412e3248c4e07707f7ae1bb83cc7dbf20c2df1f059168171e1121fa4a798" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.246812 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.262548 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5gtpk" podStartSLOduration=3.686015786 podStartE2EDuration="6.262525925s" podCreationTimestamp="2025-11-29 04:17:56 +0000 UTC" firstStartedPulling="2025-11-29 04:17:58.157296106 +0000 UTC m=+405.221799620" lastFinishedPulling="2025-11-29 04:18:00.733806245 +0000 UTC m=+407.798309759" observedRunningTime="2025-11-29 04:18:01.287211942 +0000 UTC m=+408.351715466" watchObservedRunningTime="2025-11-29 04:18:02.262525925 +0000 UTC m=+409.327029439" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.416669 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/171d32d8-1dcb-497d-9724-d798414c5602-installation-pull-secrets\") pod \"171d32d8-1dcb-497d-9724-d798414c5602\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.416713 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/171d32d8-1dcb-497d-9724-d798414c5602-ca-trust-extracted\") pod \"171d32d8-1dcb-497d-9724-d798414c5602\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.416732 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-registry-certificates\") pod \"171d32d8-1dcb-497d-9724-d798414c5602\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.416767 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7fg2\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-kube-api-access-m7fg2\") pod \"171d32d8-1dcb-497d-9724-d798414c5602\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.416808 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-bound-sa-token\") pod \"171d32d8-1dcb-497d-9724-d798414c5602\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.416961 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"171d32d8-1dcb-497d-9724-d798414c5602\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.417010 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-trusted-ca\") pod \"171d32d8-1dcb-497d-9724-d798414c5602\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.417053 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-registry-tls\") pod \"171d32d8-1dcb-497d-9724-d798414c5602\" (UID: \"171d32d8-1dcb-497d-9724-d798414c5602\") " Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.417583 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "171d32d8-1dcb-497d-9724-d798414c5602" (UID: "171d32d8-1dcb-497d-9724-d798414c5602"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.417811 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "171d32d8-1dcb-497d-9724-d798414c5602" (UID: "171d32d8-1dcb-497d-9724-d798414c5602"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.428155 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-kube-api-access-m7fg2" (OuterVolumeSpecName: "kube-api-access-m7fg2") pod "171d32d8-1dcb-497d-9724-d798414c5602" (UID: "171d32d8-1dcb-497d-9724-d798414c5602"). InnerVolumeSpecName "kube-api-access-m7fg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.428693 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "171d32d8-1dcb-497d-9724-d798414c5602" (UID: "171d32d8-1dcb-497d-9724-d798414c5602"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.430696 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "171d32d8-1dcb-497d-9724-d798414c5602" (UID: "171d32d8-1dcb-497d-9724-d798414c5602"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.431120 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "171d32d8-1dcb-497d-9724-d798414c5602" (UID: "171d32d8-1dcb-497d-9724-d798414c5602"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.436558 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/171d32d8-1dcb-497d-9724-d798414c5602-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "171d32d8-1dcb-497d-9724-d798414c5602" (UID: "171d32d8-1dcb-497d-9724-d798414c5602"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.444030 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/171d32d8-1dcb-497d-9724-d798414c5602-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "171d32d8-1dcb-497d-9724-d798414c5602" (UID: "171d32d8-1dcb-497d-9724-d798414c5602"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.518406 4631 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.518652 4631 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/171d32d8-1dcb-497d-9724-d798414c5602-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.518717 4631 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/171d32d8-1dcb-497d-9724-d798414c5602-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.518786 4631 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.518845 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7fg2\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-kube-api-access-m7fg2\") on node \"crc\" DevicePath \"\"" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.518900 4631 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/171d32d8-1dcb-497d-9724-d798414c5602-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 29 04:18:02 crc kubenswrapper[4631]: I1129 04:18:02.519221 4631 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/171d32d8-1dcb-497d-9724-d798414c5602-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:18:03 crc kubenswrapper[4631]: I1129 04:18:03.222445 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8h4ns" Nov 29 04:18:03 crc kubenswrapper[4631]: I1129 04:18:03.253024 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8h4ns"] Nov 29 04:18:03 crc kubenswrapper[4631]: I1129 04:18:03.259208 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8h4ns"] Nov 29 04:18:04 crc kubenswrapper[4631]: I1129 04:18:04.618442 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:18:04 crc kubenswrapper[4631]: I1129 04:18:04.619312 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:18:04 crc kubenswrapper[4631]: I1129 04:18:04.666167 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:18:04 crc kubenswrapper[4631]: I1129 04:18:04.812864 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:18:04 crc kubenswrapper[4631]: I1129 04:18:04.820457 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:18:04 crc kubenswrapper[4631]: I1129 04:18:04.871106 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:18:05 crc kubenswrapper[4631]: I1129 04:18:05.221786 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="171d32d8-1dcb-497d-9724-d798414c5602" path="/var/lib/kubelet/pods/171d32d8-1dcb-497d-9724-d798414c5602/volumes" Nov 29 04:18:05 crc kubenswrapper[4631]: I1129 04:18:05.271426 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p5b4b" Nov 29 04:18:05 crc kubenswrapper[4631]: I1129 04:18:05.278247 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mzpdn" Nov 29 04:18:07 crc kubenswrapper[4631]: I1129 04:18:07.056307 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:18:07 crc kubenswrapper[4631]: I1129 04:18:07.056802 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:18:07 crc kubenswrapper[4631]: I1129 04:18:07.117006 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:18:07 crc kubenswrapper[4631]: I1129 04:18:07.226519 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:18:07 crc kubenswrapper[4631]: I1129 04:18:07.226555 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:18:07 crc kubenswrapper[4631]: I1129 04:18:07.286197 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:18:07 crc kubenswrapper[4631]: I1129 04:18:07.308978 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vrqjc" Nov 29 04:18:07 crc kubenswrapper[4631]: I1129 04:18:07.338025 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:18:20 crc kubenswrapper[4631]: I1129 04:18:20.716604 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:18:20 crc kubenswrapper[4631]: I1129 04:18:20.717597 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:18:20 crc kubenswrapper[4631]: I1129 04:18:20.717692 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:18:20 crc kubenswrapper[4631]: I1129 04:18:20.719913 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5d3a08db8fc9bdeb8d65723a4db4ac13f3423befc007a14c169afc04a3465ddc"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:18:20 crc kubenswrapper[4631]: I1129 04:18:20.720035 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://5d3a08db8fc9bdeb8d65723a4db4ac13f3423befc007a14c169afc04a3465ddc" gracePeriod=600 Nov 29 04:18:22 crc kubenswrapper[4631]: I1129 04:18:22.333034 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="5d3a08db8fc9bdeb8d65723a4db4ac13f3423befc007a14c169afc04a3465ddc" exitCode=0 Nov 29 04:18:22 crc kubenswrapper[4631]: I1129 04:18:22.333144 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"5d3a08db8fc9bdeb8d65723a4db4ac13f3423befc007a14c169afc04a3465ddc"} Nov 29 04:18:22 crc kubenswrapper[4631]: I1129 04:18:22.333544 4631 scope.go:117] "RemoveContainer" containerID="97bec63e59625a804c0f4e5e8befb9612641fc1601a3b3f02b0608cfbb3de557" Nov 29 04:18:23 crc kubenswrapper[4631]: I1129 04:18:23.342931 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"f92a4ea4c8fc751f84f3dd0318393a90ca37c524f672eccb6931f3b74e9f254a"} Nov 29 04:20:13 crc kubenswrapper[4631]: I1129 04:20:13.425196 4631 scope.go:117] "RemoveContainer" containerID="f3a77794fb6b523a46952be09bac56829ce220363132612ae2381218692b0788" Nov 29 04:20:50 crc kubenswrapper[4631]: I1129 04:20:50.716292 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:20:50 crc kubenswrapper[4631]: I1129 04:20:50.717020 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:21:20 crc kubenswrapper[4631]: I1129 04:21:20.716499 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:21:20 crc kubenswrapper[4631]: I1129 04:21:20.717230 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:21:50 crc kubenswrapper[4631]: I1129 04:21:50.716157 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:21:50 crc kubenswrapper[4631]: I1129 04:21:50.717045 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:21:50 crc kubenswrapper[4631]: I1129 04:21:50.717115 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:21:50 crc kubenswrapper[4631]: I1129 04:21:50.717939 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f92a4ea4c8fc751f84f3dd0318393a90ca37c524f672eccb6931f3b74e9f254a"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:21:50 crc kubenswrapper[4631]: I1129 04:21:50.718033 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://f92a4ea4c8fc751f84f3dd0318393a90ca37c524f672eccb6931f3b74e9f254a" gracePeriod=600 Nov 29 04:21:51 crc kubenswrapper[4631]: I1129 04:21:51.824227 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="f92a4ea4c8fc751f84f3dd0318393a90ca37c524f672eccb6931f3b74e9f254a" exitCode=0 Nov 29 04:21:51 crc kubenswrapper[4631]: I1129 04:21:51.824350 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"f92a4ea4c8fc751f84f3dd0318393a90ca37c524f672eccb6931f3b74e9f254a"} Nov 29 04:21:51 crc kubenswrapper[4631]: I1129 04:21:51.824849 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"d24bc233b5493c7d82c41dde646e52c2ccbd2abaf110835404b67654167e1ec2"} Nov 29 04:21:51 crc kubenswrapper[4631]: I1129 04:21:51.824878 4631 scope.go:117] "RemoveContainer" containerID="5d3a08db8fc9bdeb8d65723a4db4ac13f3423befc007a14c169afc04a3465ddc" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.850498 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-n4mtg"] Nov 29 04:23:02 crc kubenswrapper[4631]: E1129 04:23:02.851290 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="171d32d8-1dcb-497d-9724-d798414c5602" containerName="registry" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.851303 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="171d32d8-1dcb-497d-9724-d798414c5602" containerName="registry" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.851429 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="171d32d8-1dcb-497d-9724-d798414c5602" containerName="registry" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.851835 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-n4mtg" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.869715 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.870060 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.870201 4631 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-dzzfn" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.870655 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-5gqxn"] Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.871357 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-5gqxn" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.874731 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-n4mtg"] Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.876853 4631 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-99pkq" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.886617 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-66b6l"] Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.887511 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-66b6l" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.890249 4631 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-mwwbz" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.894253 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-5gqxn"] Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.914455 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-66b6l"] Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.941197 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-462sx\" (UniqueName: \"kubernetes.io/projected/9f4942a9-5091-4ae9-b7ba-9e9aa329161f-kube-api-access-462sx\") pod \"cert-manager-cainjector-7f985d654d-n4mtg\" (UID: \"9f4942a9-5091-4ae9-b7ba-9e9aa329161f\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-n4mtg" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.941285 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhw79\" (UniqueName: \"kubernetes.io/projected/1cff1f89-8342-4b11-98a6-c6d2cb2bed76-kube-api-access-zhw79\") pod \"cert-manager-webhook-5655c58dd6-66b6l\" (UID: \"1cff1f89-8342-4b11-98a6-c6d2cb2bed76\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-66b6l" Nov 29 04:23:02 crc kubenswrapper[4631]: I1129 04:23:02.941430 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvlzp\" (UniqueName: \"kubernetes.io/projected/82882099-8669-4d95-a03f-7da7a69b3865-kube-api-access-wvlzp\") pod \"cert-manager-5b446d88c5-5gqxn\" (UID: \"82882099-8669-4d95-a03f-7da7a69b3865\") " pod="cert-manager/cert-manager-5b446d88c5-5gqxn" Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.042697 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-462sx\" (UniqueName: \"kubernetes.io/projected/9f4942a9-5091-4ae9-b7ba-9e9aa329161f-kube-api-access-462sx\") pod \"cert-manager-cainjector-7f985d654d-n4mtg\" (UID: \"9f4942a9-5091-4ae9-b7ba-9e9aa329161f\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-n4mtg" Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.042752 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhw79\" (UniqueName: \"kubernetes.io/projected/1cff1f89-8342-4b11-98a6-c6d2cb2bed76-kube-api-access-zhw79\") pod \"cert-manager-webhook-5655c58dd6-66b6l\" (UID: \"1cff1f89-8342-4b11-98a6-c6d2cb2bed76\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-66b6l" Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.042787 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvlzp\" (UniqueName: \"kubernetes.io/projected/82882099-8669-4d95-a03f-7da7a69b3865-kube-api-access-wvlzp\") pod \"cert-manager-5b446d88c5-5gqxn\" (UID: \"82882099-8669-4d95-a03f-7da7a69b3865\") " pod="cert-manager/cert-manager-5b446d88c5-5gqxn" Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.064501 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvlzp\" (UniqueName: \"kubernetes.io/projected/82882099-8669-4d95-a03f-7da7a69b3865-kube-api-access-wvlzp\") pod \"cert-manager-5b446d88c5-5gqxn\" (UID: \"82882099-8669-4d95-a03f-7da7a69b3865\") " pod="cert-manager/cert-manager-5b446d88c5-5gqxn" Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.064970 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhw79\" (UniqueName: \"kubernetes.io/projected/1cff1f89-8342-4b11-98a6-c6d2cb2bed76-kube-api-access-zhw79\") pod \"cert-manager-webhook-5655c58dd6-66b6l\" (UID: \"1cff1f89-8342-4b11-98a6-c6d2cb2bed76\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-66b6l" Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.065810 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-462sx\" (UniqueName: \"kubernetes.io/projected/9f4942a9-5091-4ae9-b7ba-9e9aa329161f-kube-api-access-462sx\") pod \"cert-manager-cainjector-7f985d654d-n4mtg\" (UID: \"9f4942a9-5091-4ae9-b7ba-9e9aa329161f\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-n4mtg" Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.184173 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-n4mtg" Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.214438 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-5gqxn" Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.223041 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-66b6l" Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.500282 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-66b6l"] Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.510787 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.635093 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-n4mtg"] Nov 29 04:23:03 crc kubenswrapper[4631]: W1129 04:23:03.640339 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82882099_8669_4d95_a03f_7da7a69b3865.slice/crio-5c640befa04a809bbf43f9ce309a803e91f9913d0f2afe7a6aa9e181685d0b3c WatchSource:0}: Error finding container 5c640befa04a809bbf43f9ce309a803e91f9913d0f2afe7a6aa9e181685d0b3c: Status 404 returned error can't find the container with id 5c640befa04a809bbf43f9ce309a803e91f9913d0f2afe7a6aa9e181685d0b3c Nov 29 04:23:03 crc kubenswrapper[4631]: I1129 04:23:03.640179 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-5gqxn"] Nov 29 04:23:03 crc kubenswrapper[4631]: W1129 04:23:03.641286 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9f4942a9_5091_4ae9_b7ba_9e9aa329161f.slice/crio-38aa7216b689d7dbc18f9d892b2d695c1bc0da8ebb082e312bfd528b2f77bd90 WatchSource:0}: Error finding container 38aa7216b689d7dbc18f9d892b2d695c1bc0da8ebb082e312bfd528b2f77bd90: Status 404 returned error can't find the container with id 38aa7216b689d7dbc18f9d892b2d695c1bc0da8ebb082e312bfd528b2f77bd90 Nov 29 04:23:04 crc kubenswrapper[4631]: I1129 04:23:04.355825 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-66b6l" event={"ID":"1cff1f89-8342-4b11-98a6-c6d2cb2bed76","Type":"ContainerStarted","Data":"6d2dab9be2bb0fa1df5de52cdbb019c40daf297d1967d1f07afc064da0dd69d7"} Nov 29 04:23:04 crc kubenswrapper[4631]: I1129 04:23:04.356673 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-n4mtg" event={"ID":"9f4942a9-5091-4ae9-b7ba-9e9aa329161f","Type":"ContainerStarted","Data":"38aa7216b689d7dbc18f9d892b2d695c1bc0da8ebb082e312bfd528b2f77bd90"} Nov 29 04:23:04 crc kubenswrapper[4631]: I1129 04:23:04.357819 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-5gqxn" event={"ID":"82882099-8669-4d95-a03f-7da7a69b3865","Type":"ContainerStarted","Data":"5c640befa04a809bbf43f9ce309a803e91f9913d0f2afe7a6aa9e181685d0b3c"} Nov 29 04:23:06 crc kubenswrapper[4631]: I1129 04:23:06.376074 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-66b6l" event={"ID":"1cff1f89-8342-4b11-98a6-c6d2cb2bed76","Type":"ContainerStarted","Data":"9fd2b12b3955c14b8d49b24376b7515f2f2f455337698b218a1ce069fd9c3a02"} Nov 29 04:23:06 crc kubenswrapper[4631]: I1129 04:23:06.376499 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-66b6l" Nov 29 04:23:06 crc kubenswrapper[4631]: I1129 04:23:06.394962 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-66b6l" podStartSLOduration=2.23670416 podStartE2EDuration="4.394935454s" podCreationTimestamp="2025-11-29 04:23:02 +0000 UTC" firstStartedPulling="2025-11-29 04:23:03.509980831 +0000 UTC m=+710.574484345" lastFinishedPulling="2025-11-29 04:23:05.668212125 +0000 UTC m=+712.732715639" observedRunningTime="2025-11-29 04:23:06.386458024 +0000 UTC m=+713.450961578" watchObservedRunningTime="2025-11-29 04:23:06.394935454 +0000 UTC m=+713.459438998" Nov 29 04:23:07 crc kubenswrapper[4631]: I1129 04:23:07.382565 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-5gqxn" event={"ID":"82882099-8669-4d95-a03f-7da7a69b3865","Type":"ContainerStarted","Data":"692e6268c3b0cd9e4040e9484243c369147c2bfdb9d9c6f87bc97db3b3590da6"} Nov 29 04:23:07 crc kubenswrapper[4631]: I1129 04:23:07.383868 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-n4mtg" event={"ID":"9f4942a9-5091-4ae9-b7ba-9e9aa329161f","Type":"ContainerStarted","Data":"be27e23b95c001d8437a4365de2776fa79dd362533128e12c1ccc191ad58e144"} Nov 29 04:23:07 crc kubenswrapper[4631]: I1129 04:23:07.408479 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-5gqxn" podStartSLOduration=1.938812717 podStartE2EDuration="5.408271514s" podCreationTimestamp="2025-11-29 04:23:02 +0000 UTC" firstStartedPulling="2025-11-29 04:23:03.642229432 +0000 UTC m=+710.706732946" lastFinishedPulling="2025-11-29 04:23:07.111688229 +0000 UTC m=+714.176191743" observedRunningTime="2025-11-29 04:23:07.39968311 +0000 UTC m=+714.464186634" watchObservedRunningTime="2025-11-29 04:23:07.408271514 +0000 UTC m=+714.472775108" Nov 29 04:23:07 crc kubenswrapper[4631]: I1129 04:23:07.439815 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-n4mtg" podStartSLOduration=1.984248296 podStartE2EDuration="5.439796233s" podCreationTimestamp="2025-11-29 04:23:02 +0000 UTC" firstStartedPulling="2025-11-29 04:23:03.643091943 +0000 UTC m=+710.707595457" lastFinishedPulling="2025-11-29 04:23:07.09863988 +0000 UTC m=+714.163143394" observedRunningTime="2025-11-29 04:23:07.42452956 +0000 UTC m=+714.489033074" watchObservedRunningTime="2025-11-29 04:23:07.439796233 +0000 UTC m=+714.504299747" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.230444 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-66b6l" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.439527 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2npl6"] Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.440126 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovn-controller" containerID="cri-o://7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396" gracePeriod=30 Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.440374 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="nbdb" containerID="cri-o://d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" gracePeriod=30 Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.440537 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="northd" containerID="cri-o://2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7" gracePeriod=30 Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.440179 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="sbdb" containerID="cri-o://52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" gracePeriod=30 Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.440590 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b" gracePeriod=30 Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.440745 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="kube-rbac-proxy-node" containerID="cri-o://58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b" gracePeriod=30 Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.441077 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovn-acl-logging" containerID="cri-o://ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132" gracePeriod=30 Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.495189 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" containerID="cri-o://7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6" gracePeriod=30 Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.503588 4631 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.512766 4631 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.515173 4631 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.517458 4631 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.521851 4631 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.521897 4631 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="nbdb" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.524719 4631 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.524770 4631 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="sbdb" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.783960 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/3.log" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.786596 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovn-acl-logging/0.log" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.787175 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovn-controller/0.log" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.787693 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.870455 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-j5pb9"] Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.870764 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="kubecfg-setup" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.870793 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="kubecfg-setup" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.870813 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="kube-rbac-proxy-node" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.870827 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="kube-rbac-proxy-node" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.870848 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.870863 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.870882 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovn-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.870895 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovn-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.870916 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="kube-rbac-proxy-ovn-metrics" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.870928 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="kube-rbac-proxy-ovn-metrics" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.870946 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="northd" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.870959 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="northd" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.870973 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="nbdb" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.870986 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="nbdb" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.871005 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871019 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.871036 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovn-acl-logging" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871050 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovn-acl-logging" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.871076 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="sbdb" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871089 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="sbdb" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.871104 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871116 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871280 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871297 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871314 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovn-acl-logging" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871366 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="sbdb" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871387 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="kube-rbac-proxy-node" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871408 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="kube-rbac-proxy-ovn-metrics" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871432 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="northd" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871476 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="nbdb" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871503 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871527 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovn-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.871730 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871748 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871928 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.871947 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: E1129 04:23:13.872118 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.872135 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda25410-78a0-47a1-894f-621a855bd64a" containerName="ovnkube-controller" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.874561 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.902926 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-log-socket\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.902959 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-bin\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.902978 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-netns\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.902994 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-kubelet\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903017 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-var-lib-openvswitch\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903065 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-env-overrides\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903086 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffhfp\" (UniqueName: \"kubernetes.io/projected/cda25410-78a0-47a1-894f-621a855bd64a-kube-api-access-ffhfp\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903103 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-etc-openvswitch\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903117 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-ovn\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903131 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-netd\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903145 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-config\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903172 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-slash\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903189 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-openvswitch\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903210 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-script-lib\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903230 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-node-log\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903251 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cda25410-78a0-47a1-894f-621a855bd64a-ovn-node-metrics-cert\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903273 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903293 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-ovn-kubernetes\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903413 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-systemd\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903430 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-systemd-units\") pod \"cda25410-78a0-47a1-894f-621a855bd64a\" (UID: \"cda25410-78a0-47a1-894f-621a855bd64a\") " Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903537 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903599 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903627 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-node-log" (OuterVolumeSpecName: "node-log") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903668 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903726 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903778 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903813 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-slash" (OuterVolumeSpecName: "host-slash") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903859 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903892 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-log-socket" (OuterVolumeSpecName: "log-socket") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903923 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903956 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.903989 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904147 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904191 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904500 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904532 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904684 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904816 4631 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904844 4631 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904863 4631 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-log-socket\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904882 4631 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904900 4631 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904918 4631 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904938 4631 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904956 4631 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904974 4631 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-slash\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.904993 4631 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.905012 4631 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-node-log\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.905030 4631 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.905053 4631 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.908473 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cda25410-78a0-47a1-894f-621a855bd64a-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.910864 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cda25410-78a0-47a1-894f-621a855bd64a-kube-api-access-ffhfp" (OuterVolumeSpecName: "kube-api-access-ffhfp") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "kube-api-access-ffhfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:23:13 crc kubenswrapper[4631]: I1129 04:23:13.925927 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "cda25410-78a0-47a1-894f-621a855bd64a" (UID: "cda25410-78a0-47a1-894f-621a855bd64a"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006419 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-systemd-units\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006480 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d174cbd1-20f6-44fb-8af5-0f332569cf59-ovnkube-script-lib\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006499 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5fvd\" (UniqueName: \"kubernetes.io/projected/d174cbd1-20f6-44fb-8af5-0f332569cf59-kube-api-access-t5fvd\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006518 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d174cbd1-20f6-44fb-8af5-0f332569cf59-ovn-node-metrics-cert\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006536 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-log-socket\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006641 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-run-ovn-kubernetes\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006694 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-cni-netd\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006741 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-node-log\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006770 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-slash\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006914 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-run-ovn\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.006974 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d174cbd1-20f6-44fb-8af5-0f332569cf59-env-overrides\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007020 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-etc-openvswitch\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007056 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-run-netns\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007099 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-kubelet\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007126 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007197 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-var-lib-openvswitch\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007253 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-run-systemd\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007303 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d174cbd1-20f6-44fb-8af5-0f332569cf59-ovnkube-config\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007404 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-run-openvswitch\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007430 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-cni-bin\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007500 4631 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cda25410-78a0-47a1-894f-621a855bd64a-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007517 4631 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007528 4631 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007538 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffhfp\" (UniqueName: \"kubernetes.io/projected/cda25410-78a0-47a1-894f-621a855bd64a-kube-api-access-ffhfp\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007546 4631 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cda25410-78a0-47a1-894f-621a855bd64a-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007566 4631 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.007575 4631 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cda25410-78a0-47a1-894f-621a855bd64a-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108651 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-etc-openvswitch\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108712 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-run-netns\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108744 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-kubelet\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108799 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108823 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-etc-openvswitch\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108860 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-var-lib-openvswitch\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108909 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-run-systemd\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108928 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-run-netns\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108956 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d174cbd1-20f6-44fb-8af5-0f332569cf59-ovnkube-config\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108967 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-kubelet\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108980 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-run-systemd\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109058 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-run-openvswitch\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109099 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-cni-bin\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109145 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-systemd-units\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108910 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109182 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d174cbd1-20f6-44fb-8af5-0f332569cf59-ovnkube-script-lib\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109217 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5fvd\" (UniqueName: \"kubernetes.io/projected/d174cbd1-20f6-44fb-8af5-0f332569cf59-kube-api-access-t5fvd\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109228 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-run-openvswitch\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.108945 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-var-lib-openvswitch\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109260 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d174cbd1-20f6-44fb-8af5-0f332569cf59-ovn-node-metrics-cert\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109294 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-cni-bin\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109297 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-log-socket\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109358 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-log-socket\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109398 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-run-ovn-kubernetes\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109408 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-systemd-units\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109432 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-cni-netd\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109485 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-node-log\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109537 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-slash\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109549 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-run-ovn-kubernetes\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109599 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-slash\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109609 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-run-ovn\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109642 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d174cbd1-20f6-44fb-8af5-0f332569cf59-env-overrides\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109674 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-node-log\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109728 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-run-ovn\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.109644 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d174cbd1-20f6-44fb-8af5-0f332569cf59-host-cni-netd\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.110151 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d174cbd1-20f6-44fb-8af5-0f332569cf59-ovnkube-config\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.110625 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d174cbd1-20f6-44fb-8af5-0f332569cf59-env-overrides\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.110693 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d174cbd1-20f6-44fb-8af5-0f332569cf59-ovnkube-script-lib\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.112902 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d174cbd1-20f6-44fb-8af5-0f332569cf59-ovn-node-metrics-cert\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.136929 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5fvd\" (UniqueName: \"kubernetes.io/projected/d174cbd1-20f6-44fb-8af5-0f332569cf59-kube-api-access-t5fvd\") pod \"ovnkube-node-j5pb9\" (UID: \"d174cbd1-20f6-44fb-8af5-0f332569cf59\") " pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.193843 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.440405 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-pbk6b_7f871e13-bbe2-4104-8f40-70e695653fef/kube-multus/2.log" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.442659 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-pbk6b_7f871e13-bbe2-4104-8f40-70e695653fef/kube-multus/1.log" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.442725 4631 generic.go:334] "Generic (PLEG): container finished" podID="7f871e13-bbe2-4104-8f40-70e695653fef" containerID="8b098d771c091184a554145d83225c1b8122a63fed63b77f3eaf7d286223380b" exitCode=2 Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.442835 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-pbk6b" event={"ID":"7f871e13-bbe2-4104-8f40-70e695653fef","Type":"ContainerDied","Data":"8b098d771c091184a554145d83225c1b8122a63fed63b77f3eaf7d286223380b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.442983 4631 scope.go:117] "RemoveContainer" containerID="73ffb4e6113d3a289afa5e4cb44addbb248d7dfaa993f4524a0c14bafada6614" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.444098 4631 scope.go:117] "RemoveContainer" containerID="8b098d771c091184a554145d83225c1b8122a63fed63b77f3eaf7d286223380b" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.448770 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-pbk6b_openshift-multus(7f871e13-bbe2-4104-8f40-70e695653fef)\"" pod="openshift-multus/multus-pbk6b" podUID="7f871e13-bbe2-4104-8f40-70e695653fef" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.450279 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovnkube-controller/3.log" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.455458 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovn-acl-logging/0.log" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.456303 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2npl6_cda25410-78a0-47a1-894f-621a855bd64a/ovn-controller/0.log" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.456959 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6" exitCode=0 Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457001 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" exitCode=0 Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457016 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" exitCode=0 Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457038 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7" exitCode=0 Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457004 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457093 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457114 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457147 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457171 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457191 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457051 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b" exitCode=0 Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457236 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b" exitCode=0 Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457255 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132" exitCode=143 Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457272 4631 generic.go:334] "Generic (PLEG): container finished" podID="cda25410-78a0-47a1-894f-621a855bd64a" containerID="7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396" exitCode=143 Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457386 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457409 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457427 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457439 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457450 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457462 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457473 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457483 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457494 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457504 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457515 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457531 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457548 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457561 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457572 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457582 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457594 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457605 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457615 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457625 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457635 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457647 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457662 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457676 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457689 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457700 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457711 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457721 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457731 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457741 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457751 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457775 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457787 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457805 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2npl6" event={"ID":"cda25410-78a0-47a1-894f-621a855bd64a","Type":"ContainerDied","Data":"6f5d544f678345df368ed32c558318be9c0cb311477d41759e7682ddc7cda8f3"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457821 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457835 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457846 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457856 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457867 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457877 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457886 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457896 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457906 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.457916 4631 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.461738 4631 generic.go:334] "Generic (PLEG): container finished" podID="d174cbd1-20f6-44fb-8af5-0f332569cf59" containerID="231c378069e6cb0767cd8de35e8b8a5a99fb5557778524c9e6e71683d0122ffd" exitCode=0 Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.461791 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" event={"ID":"d174cbd1-20f6-44fb-8af5-0f332569cf59","Type":"ContainerDied","Data":"231c378069e6cb0767cd8de35e8b8a5a99fb5557778524c9e6e71683d0122ffd"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.461829 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" event={"ID":"d174cbd1-20f6-44fb-8af5-0f332569cf59","Type":"ContainerStarted","Data":"e407fafa10428f406ff9028f1625cff0ed68bed9c8b7e931effdde5201c78915"} Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.479386 4631 scope.go:117] "RemoveContainer" containerID="7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.518285 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.537176 4631 scope.go:117] "RemoveContainer" containerID="52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.547877 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2npl6"] Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.556305 4631 scope.go:117] "RemoveContainer" containerID="d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.556963 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2npl6"] Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.595772 4631 scope.go:117] "RemoveContainer" containerID="2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.612261 4631 scope.go:117] "RemoveContainer" containerID="d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.626077 4631 scope.go:117] "RemoveContainer" containerID="58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.662708 4631 scope.go:117] "RemoveContainer" containerID="ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.678480 4631 scope.go:117] "RemoveContainer" containerID="7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.705872 4631 scope.go:117] "RemoveContainer" containerID="cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.721971 4631 scope.go:117] "RemoveContainer" containerID="7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.722508 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6\": container with ID starting with 7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6 not found: ID does not exist" containerID="7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.722732 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6"} err="failed to get container status \"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6\": rpc error: code = NotFound desc = could not find container \"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6\": container with ID starting with 7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.722781 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.723174 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\": container with ID starting with dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d not found: ID does not exist" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.723201 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d"} err="failed to get container status \"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\": rpc error: code = NotFound desc = could not find container \"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\": container with ID starting with dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.723221 4631 scope.go:117] "RemoveContainer" containerID="52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.723604 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\": container with ID starting with 52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9 not found: ID does not exist" containerID="52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.723633 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9"} err="failed to get container status \"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\": rpc error: code = NotFound desc = could not find container \"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\": container with ID starting with 52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.723651 4631 scope.go:117] "RemoveContainer" containerID="d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.723973 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\": container with ID starting with d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215 not found: ID does not exist" containerID="d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.723998 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215"} err="failed to get container status \"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\": rpc error: code = NotFound desc = could not find container \"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\": container with ID starting with d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.724014 4631 scope.go:117] "RemoveContainer" containerID="2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.724305 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\": container with ID starting with 2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7 not found: ID does not exist" containerID="2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.724346 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7"} err="failed to get container status \"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\": rpc error: code = NotFound desc = could not find container \"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\": container with ID starting with 2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.724365 4631 scope.go:117] "RemoveContainer" containerID="d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.724632 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\": container with ID starting with d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b not found: ID does not exist" containerID="d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.724655 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b"} err="failed to get container status \"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\": rpc error: code = NotFound desc = could not find container \"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\": container with ID starting with d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.724671 4631 scope.go:117] "RemoveContainer" containerID="58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.725040 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\": container with ID starting with 58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b not found: ID does not exist" containerID="58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.725066 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b"} err="failed to get container status \"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\": rpc error: code = NotFound desc = could not find container \"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\": container with ID starting with 58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.725083 4631 scope.go:117] "RemoveContainer" containerID="ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.725467 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\": container with ID starting with ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132 not found: ID does not exist" containerID="ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.725491 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132"} err="failed to get container status \"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\": rpc error: code = NotFound desc = could not find container \"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\": container with ID starting with ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.725509 4631 scope.go:117] "RemoveContainer" containerID="7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.725886 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\": container with ID starting with 7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396 not found: ID does not exist" containerID="7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.725908 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396"} err="failed to get container status \"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\": rpc error: code = NotFound desc = could not find container \"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\": container with ID starting with 7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.726318 4631 scope.go:117] "RemoveContainer" containerID="cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54" Nov 29 04:23:14 crc kubenswrapper[4631]: E1129 04:23:14.726726 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\": container with ID starting with cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54 not found: ID does not exist" containerID="cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.726753 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54"} err="failed to get container status \"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\": rpc error: code = NotFound desc = could not find container \"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\": container with ID starting with cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.726769 4631 scope.go:117] "RemoveContainer" containerID="7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.727001 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6"} err="failed to get container status \"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6\": rpc error: code = NotFound desc = could not find container \"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6\": container with ID starting with 7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.727017 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.727383 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d"} err="failed to get container status \"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\": rpc error: code = NotFound desc = could not find container \"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\": container with ID starting with dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.727417 4631 scope.go:117] "RemoveContainer" containerID="52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.727698 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9"} err="failed to get container status \"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\": rpc error: code = NotFound desc = could not find container \"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\": container with ID starting with 52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.727721 4631 scope.go:117] "RemoveContainer" containerID="d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.729194 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215"} err="failed to get container status \"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\": rpc error: code = NotFound desc = could not find container \"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\": container with ID starting with d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.729214 4631 scope.go:117] "RemoveContainer" containerID="2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.731521 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7"} err="failed to get container status \"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\": rpc error: code = NotFound desc = could not find container \"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\": container with ID starting with 2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.731551 4631 scope.go:117] "RemoveContainer" containerID="d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.732084 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b"} err="failed to get container status \"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\": rpc error: code = NotFound desc = could not find container \"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\": container with ID starting with d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.732113 4631 scope.go:117] "RemoveContainer" containerID="58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.732672 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b"} err="failed to get container status \"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\": rpc error: code = NotFound desc = could not find container \"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\": container with ID starting with 58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.732699 4631 scope.go:117] "RemoveContainer" containerID="ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.733064 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132"} err="failed to get container status \"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\": rpc error: code = NotFound desc = could not find container \"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\": container with ID starting with ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.733089 4631 scope.go:117] "RemoveContainer" containerID="7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.733319 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396"} err="failed to get container status \"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\": rpc error: code = NotFound desc = could not find container \"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\": container with ID starting with 7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.733361 4631 scope.go:117] "RemoveContainer" containerID="cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.733651 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54"} err="failed to get container status \"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\": rpc error: code = NotFound desc = could not find container \"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\": container with ID starting with cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.733676 4631 scope.go:117] "RemoveContainer" containerID="7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.733859 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6"} err="failed to get container status \"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6\": rpc error: code = NotFound desc = could not find container \"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6\": container with ID starting with 7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.733884 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.734171 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d"} err="failed to get container status \"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\": rpc error: code = NotFound desc = could not find container \"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\": container with ID starting with dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.734199 4631 scope.go:117] "RemoveContainer" containerID="52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.734505 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9"} err="failed to get container status \"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\": rpc error: code = NotFound desc = could not find container \"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\": container with ID starting with 52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.734529 4631 scope.go:117] "RemoveContainer" containerID="d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.734725 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215"} err="failed to get container status \"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\": rpc error: code = NotFound desc = could not find container \"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\": container with ID starting with d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.734749 4631 scope.go:117] "RemoveContainer" containerID="2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.734909 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7"} err="failed to get container status \"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\": rpc error: code = NotFound desc = could not find container \"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\": container with ID starting with 2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.734931 4631 scope.go:117] "RemoveContainer" containerID="d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.735086 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b"} err="failed to get container status \"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\": rpc error: code = NotFound desc = could not find container \"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\": container with ID starting with d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.735107 4631 scope.go:117] "RemoveContainer" containerID="58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.735266 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b"} err="failed to get container status \"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\": rpc error: code = NotFound desc = could not find container \"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\": container with ID starting with 58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.735286 4631 scope.go:117] "RemoveContainer" containerID="ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.735463 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132"} err="failed to get container status \"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\": rpc error: code = NotFound desc = could not find container \"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\": container with ID starting with ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.735484 4631 scope.go:117] "RemoveContainer" containerID="7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.735650 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396"} err="failed to get container status \"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\": rpc error: code = NotFound desc = could not find container \"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\": container with ID starting with 7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.735671 4631 scope.go:117] "RemoveContainer" containerID="cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.736309 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54"} err="failed to get container status \"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\": rpc error: code = NotFound desc = could not find container \"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\": container with ID starting with cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.736359 4631 scope.go:117] "RemoveContainer" containerID="7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.736804 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6"} err="failed to get container status \"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6\": rpc error: code = NotFound desc = could not find container \"7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6\": container with ID starting with 7ef49204e5ae4fb4cd678a747db510b49986da3534532427986157366e03a8d6 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.736824 4631 scope.go:117] "RemoveContainer" containerID="dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.737179 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d"} err="failed to get container status \"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\": rpc error: code = NotFound desc = could not find container \"dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d\": container with ID starting with dab4098eee92d887562378f9f6c20b0ca9aea761207e206b41579236a676521d not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.737209 4631 scope.go:117] "RemoveContainer" containerID="52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.737547 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9"} err="failed to get container status \"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\": rpc error: code = NotFound desc = could not find container \"52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9\": container with ID starting with 52faf1dc7075ce71cef8155d4e122825bcd897ffe58d400fdfe79076bb4218d9 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.737584 4631 scope.go:117] "RemoveContainer" containerID="d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.737870 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215"} err="failed to get container status \"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\": rpc error: code = NotFound desc = could not find container \"d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215\": container with ID starting with d5feefd14c66185daba57a8e590e0da2e40e3c5443bac8b0cb397ccf548f7215 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.737894 4631 scope.go:117] "RemoveContainer" containerID="2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.738430 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7"} err="failed to get container status \"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\": rpc error: code = NotFound desc = could not find container \"2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7\": container with ID starting with 2a43d2e677610afc3c8f3e0d639935400d2ee24e84c6b9247100a9a29b075aa7 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.738454 4631 scope.go:117] "RemoveContainer" containerID="d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.738680 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b"} err="failed to get container status \"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\": rpc error: code = NotFound desc = could not find container \"d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b\": container with ID starting with d946ea7908d5240dcfcb995b4ca83372e3c2f7c57c995f45ad00ff9db1f1d52b not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.738702 4631 scope.go:117] "RemoveContainer" containerID="58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.738999 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b"} err="failed to get container status \"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\": rpc error: code = NotFound desc = could not find container \"58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b\": container with ID starting with 58cdf33f6c7b1302316a45733ab447ac770ba2c24ecb3fde338dbc6c76fad72b not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.739038 4631 scope.go:117] "RemoveContainer" containerID="ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.739505 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132"} err="failed to get container status \"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\": rpc error: code = NotFound desc = could not find container \"ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132\": container with ID starting with ab8bab0965050efb5d5fd3f2c8a77fd910947caf33cef04a6f53d7392d14d132 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.739529 4631 scope.go:117] "RemoveContainer" containerID="7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.740681 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396"} err="failed to get container status \"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\": rpc error: code = NotFound desc = could not find container \"7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396\": container with ID starting with 7d0539f31e210427c8bb20f39283bcb364f169088ddef67b7b60f63cddc98396 not found: ID does not exist" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.740731 4631 scope.go:117] "RemoveContainer" containerID="cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54" Nov 29 04:23:14 crc kubenswrapper[4631]: I1129 04:23:14.741354 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54"} err="failed to get container status \"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\": rpc error: code = NotFound desc = could not find container \"cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54\": container with ID starting with cce6d2630e9b3dcf06de85fd35b82a4c4af51dc6cac904ff6e2364f502e8af54 not found: ID does not exist" Nov 29 04:23:15 crc kubenswrapper[4631]: I1129 04:23:15.225064 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cda25410-78a0-47a1-894f-621a855bd64a" path="/var/lib/kubelet/pods/cda25410-78a0-47a1-894f-621a855bd64a/volumes" Nov 29 04:23:15 crc kubenswrapper[4631]: I1129 04:23:15.471866 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-pbk6b_7f871e13-bbe2-4104-8f40-70e695653fef/kube-multus/2.log" Nov 29 04:23:15 crc kubenswrapper[4631]: I1129 04:23:15.477758 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" event={"ID":"d174cbd1-20f6-44fb-8af5-0f332569cf59","Type":"ContainerStarted","Data":"be47af678ab340c46ccb4181544db8f2df4cefbb6dad55955624f59a722de295"} Nov 29 04:23:15 crc kubenswrapper[4631]: I1129 04:23:15.477794 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" event={"ID":"d174cbd1-20f6-44fb-8af5-0f332569cf59","Type":"ContainerStarted","Data":"63bab8aba639dc0d4542fddc16167fe38bc82eca86d5a3145b72d006ded73839"} Nov 29 04:23:15 crc kubenswrapper[4631]: I1129 04:23:15.477806 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" event={"ID":"d174cbd1-20f6-44fb-8af5-0f332569cf59","Type":"ContainerStarted","Data":"1c9c5c39ba4ff18917ca1e65610b028e5a79d40f26ff40430b1503cf8211a237"} Nov 29 04:23:15 crc kubenswrapper[4631]: I1129 04:23:15.477815 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" event={"ID":"d174cbd1-20f6-44fb-8af5-0f332569cf59","Type":"ContainerStarted","Data":"31e0d80d379fe02e332e4e074ace581a111328569498d566e26aec2122c6ed84"} Nov 29 04:23:15 crc kubenswrapper[4631]: I1129 04:23:15.477825 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" event={"ID":"d174cbd1-20f6-44fb-8af5-0f332569cf59","Type":"ContainerStarted","Data":"ba5e857a1c1f69f304de82b0f15d30e4b2fe58016801f44b695ee189b0960dc4"} Nov 29 04:23:15 crc kubenswrapper[4631]: I1129 04:23:15.477832 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" event={"ID":"d174cbd1-20f6-44fb-8af5-0f332569cf59","Type":"ContainerStarted","Data":"3de3c1c02d5a27dc316013e258860fcbb19323aaac1638434dcdcd8a9b08de84"} Nov 29 04:23:17 crc kubenswrapper[4631]: I1129 04:23:17.501156 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" event={"ID":"d174cbd1-20f6-44fb-8af5-0f332569cf59","Type":"ContainerStarted","Data":"d5f86edcd0cf3bac920c31d15893a85ec6c03091a8e266c61364c7be00ca6458"} Nov 29 04:23:20 crc kubenswrapper[4631]: I1129 04:23:20.523757 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" event={"ID":"d174cbd1-20f6-44fb-8af5-0f332569cf59","Type":"ContainerStarted","Data":"17f534ef443f57f6cbc65490e31d5d516117a23775884eac1d82cbfa09bf2c57"} Nov 29 04:23:20 crc kubenswrapper[4631]: I1129 04:23:20.524105 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:20 crc kubenswrapper[4631]: I1129 04:23:20.524190 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:20 crc kubenswrapper[4631]: I1129 04:23:20.524360 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:20 crc kubenswrapper[4631]: I1129 04:23:20.555654 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:20 crc kubenswrapper[4631]: I1129 04:23:20.561403 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:20 crc kubenswrapper[4631]: I1129 04:23:20.571769 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" podStartSLOduration=7.571756604 podStartE2EDuration="7.571756604s" podCreationTimestamp="2025-11-29 04:23:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:23:20.562767831 +0000 UTC m=+727.627271355" watchObservedRunningTime="2025-11-29 04:23:20.571756604 +0000 UTC m=+727.636260128" Nov 29 04:23:27 crc kubenswrapper[4631]: I1129 04:23:27.217140 4631 scope.go:117] "RemoveContainer" containerID="8b098d771c091184a554145d83225c1b8122a63fed63b77f3eaf7d286223380b" Nov 29 04:23:28 crc kubenswrapper[4631]: I1129 04:23:28.580722 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-pbk6b_7f871e13-bbe2-4104-8f40-70e695653fef/kube-multus/2.log" Nov 29 04:23:28 crc kubenswrapper[4631]: I1129 04:23:28.581107 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-pbk6b" event={"ID":"7f871e13-bbe2-4104-8f40-70e695653fef","Type":"ContainerStarted","Data":"49167898173d1c4255ef8c5bc2ee7ac642604380764eb4923d9e63968a63ddd1"} Nov 29 04:23:44 crc kubenswrapper[4631]: I1129 04:23:44.232755 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-j5pb9" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.475382 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9"] Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.477414 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.479248 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.490625 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9"] Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.662868 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.662995 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txd4m\" (UniqueName: \"kubernetes.io/projected/6d4ed5ca-bb17-4681-9640-03977d0545eb-kube-api-access-txd4m\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.663029 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.763974 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txd4m\" (UniqueName: \"kubernetes.io/projected/6d4ed5ca-bb17-4681-9640-03977d0545eb-kube-api-access-txd4m\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.764055 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.764100 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.764990 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.765076 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:55 crc kubenswrapper[4631]: I1129 04:23:55.798886 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txd4m\" (UniqueName: \"kubernetes.io/projected/6d4ed5ca-bb17-4681-9640-03977d0545eb-kube-api-access-txd4m\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:56 crc kubenswrapper[4631]: I1129 04:23:56.092859 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:23:56 crc kubenswrapper[4631]: I1129 04:23:56.389861 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9"] Nov 29 04:23:56 crc kubenswrapper[4631]: I1129 04:23:56.765318 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" event={"ID":"6d4ed5ca-bb17-4681-9640-03977d0545eb","Type":"ContainerStarted","Data":"38e8a0e8e6d5fc80c998ae2193054bc37b2d455931ee7bb9f768e1e76bc3b00e"} Nov 29 04:23:56 crc kubenswrapper[4631]: I1129 04:23:56.765398 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" event={"ID":"6d4ed5ca-bb17-4681-9640-03977d0545eb","Type":"ContainerStarted","Data":"dcb5f20ba8b66689eb3c4bfc731a62e1aea7eece11893ac2664546f5fa0a6bb8"} Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.637977 4631 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.772717 4631 generic.go:334] "Generic (PLEG): container finished" podID="6d4ed5ca-bb17-4681-9640-03977d0545eb" containerID="38e8a0e8e6d5fc80c998ae2193054bc37b2d455931ee7bb9f768e1e76bc3b00e" exitCode=0 Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.772793 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" event={"ID":"6d4ed5ca-bb17-4681-9640-03977d0545eb","Type":"ContainerDied","Data":"38e8a0e8e6d5fc80c998ae2193054bc37b2d455931ee7bb9f768e1e76bc3b00e"} Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.782987 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-74pt5"] Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.784527 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.814896 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-74pt5"] Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.892892 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-catalog-content\") pod \"redhat-operators-74pt5\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.892934 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-utilities\") pod \"redhat-operators-74pt5\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.892958 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8v8b\" (UniqueName: \"kubernetes.io/projected/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-kube-api-access-c8v8b\") pod \"redhat-operators-74pt5\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.993901 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-catalog-content\") pod \"redhat-operators-74pt5\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.993945 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-utilities\") pod \"redhat-operators-74pt5\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.993971 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8v8b\" (UniqueName: \"kubernetes.io/projected/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-kube-api-access-c8v8b\") pod \"redhat-operators-74pt5\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.994633 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-catalog-content\") pod \"redhat-operators-74pt5\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:57 crc kubenswrapper[4631]: I1129 04:23:57.995003 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-utilities\") pod \"redhat-operators-74pt5\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:58 crc kubenswrapper[4631]: I1129 04:23:58.012687 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8v8b\" (UniqueName: \"kubernetes.io/projected/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-kube-api-access-c8v8b\") pod \"redhat-operators-74pt5\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:58 crc kubenswrapper[4631]: I1129 04:23:58.101684 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:23:58 crc kubenswrapper[4631]: I1129 04:23:58.293856 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-74pt5"] Nov 29 04:23:58 crc kubenswrapper[4631]: W1129 04:23:58.299675 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f12d4d0_f47a_4def_9cf9_a4fa1bbf5939.slice/crio-50cfe84ba75cef326938eb2eb62aaa252b2bb6043ade3f376800390bae35dda8 WatchSource:0}: Error finding container 50cfe84ba75cef326938eb2eb62aaa252b2bb6043ade3f376800390bae35dda8: Status 404 returned error can't find the container with id 50cfe84ba75cef326938eb2eb62aaa252b2bb6043ade3f376800390bae35dda8 Nov 29 04:23:58 crc kubenswrapper[4631]: I1129 04:23:58.777241 4631 generic.go:334] "Generic (PLEG): container finished" podID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerID="8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb" exitCode=0 Nov 29 04:23:58 crc kubenswrapper[4631]: I1129 04:23:58.777280 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74pt5" event={"ID":"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939","Type":"ContainerDied","Data":"8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb"} Nov 29 04:23:58 crc kubenswrapper[4631]: I1129 04:23:58.777306 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74pt5" event={"ID":"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939","Type":"ContainerStarted","Data":"50cfe84ba75cef326938eb2eb62aaa252b2bb6043ade3f376800390bae35dda8"} Nov 29 04:23:59 crc kubenswrapper[4631]: I1129 04:23:59.786017 4631 generic.go:334] "Generic (PLEG): container finished" podID="6d4ed5ca-bb17-4681-9640-03977d0545eb" containerID="4520904469edcde01216066970e135d42174526fb137d87c275f6571dacd8e46" exitCode=0 Nov 29 04:23:59 crc kubenswrapper[4631]: I1129 04:23:59.786084 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" event={"ID":"6d4ed5ca-bb17-4681-9640-03977d0545eb","Type":"ContainerDied","Data":"4520904469edcde01216066970e135d42174526fb137d87c275f6571dacd8e46"} Nov 29 04:23:59 crc kubenswrapper[4631]: I1129 04:23:59.792378 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74pt5" event={"ID":"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939","Type":"ContainerStarted","Data":"4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3"} Nov 29 04:24:00 crc kubenswrapper[4631]: I1129 04:24:00.802827 4631 generic.go:334] "Generic (PLEG): container finished" podID="6d4ed5ca-bb17-4681-9640-03977d0545eb" containerID="fbd8199ae66518730b8b6432ded8bf7625dbcabdd54cec01660354dd9405d611" exitCode=0 Nov 29 04:24:00 crc kubenswrapper[4631]: I1129 04:24:00.802940 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" event={"ID":"6d4ed5ca-bb17-4681-9640-03977d0545eb","Type":"ContainerDied","Data":"fbd8199ae66518730b8b6432ded8bf7625dbcabdd54cec01660354dd9405d611"} Nov 29 04:24:00 crc kubenswrapper[4631]: I1129 04:24:00.805602 4631 generic.go:334] "Generic (PLEG): container finished" podID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerID="4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3" exitCode=0 Nov 29 04:24:00 crc kubenswrapper[4631]: I1129 04:24:00.805671 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74pt5" event={"ID":"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939","Type":"ContainerDied","Data":"4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3"} Nov 29 04:24:01 crc kubenswrapper[4631]: I1129 04:24:01.817831 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74pt5" event={"ID":"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939","Type":"ContainerStarted","Data":"0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984"} Nov 29 04:24:01 crc kubenswrapper[4631]: I1129 04:24:01.865106 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-74pt5" podStartSLOduration=2.279335826 podStartE2EDuration="4.865070813s" podCreationTimestamp="2025-11-29 04:23:57 +0000 UTC" firstStartedPulling="2025-11-29 04:23:58.779142695 +0000 UTC m=+765.843646209" lastFinishedPulling="2025-11-29 04:24:01.364877672 +0000 UTC m=+768.429381196" observedRunningTime="2025-11-29 04:24:01.851238305 +0000 UTC m=+768.915741859" watchObservedRunningTime="2025-11-29 04:24:01.865070813 +0000 UTC m=+768.929574377" Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.070886 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.249567 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-bundle\") pod \"6d4ed5ca-bb17-4681-9640-03977d0545eb\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.249630 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-util\") pod \"6d4ed5ca-bb17-4681-9640-03977d0545eb\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.249740 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txd4m\" (UniqueName: \"kubernetes.io/projected/6d4ed5ca-bb17-4681-9640-03977d0545eb-kube-api-access-txd4m\") pod \"6d4ed5ca-bb17-4681-9640-03977d0545eb\" (UID: \"6d4ed5ca-bb17-4681-9640-03977d0545eb\") " Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.250829 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-bundle" (OuterVolumeSpecName: "bundle") pod "6d4ed5ca-bb17-4681-9640-03977d0545eb" (UID: "6d4ed5ca-bb17-4681-9640-03977d0545eb"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.259629 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d4ed5ca-bb17-4681-9640-03977d0545eb-kube-api-access-txd4m" (OuterVolumeSpecName: "kube-api-access-txd4m") pod "6d4ed5ca-bb17-4681-9640-03977d0545eb" (UID: "6d4ed5ca-bb17-4681-9640-03977d0545eb"). InnerVolumeSpecName "kube-api-access-txd4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.284588 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-util" (OuterVolumeSpecName: "util") pod "6d4ed5ca-bb17-4681-9640-03977d0545eb" (UID: "6d4ed5ca-bb17-4681-9640-03977d0545eb"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.351703 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txd4m\" (UniqueName: \"kubernetes.io/projected/6d4ed5ca-bb17-4681-9640-03977d0545eb-kube-api-access-txd4m\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.351755 4631 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.351774 4631 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d4ed5ca-bb17-4681-9640-03977d0545eb-util\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.825684 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" event={"ID":"6d4ed5ca-bb17-4681-9640-03977d0545eb","Type":"ContainerDied","Data":"dcb5f20ba8b66689eb3c4bfc731a62e1aea7eece11893ac2664546f5fa0a6bb8"} Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.825744 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dcb5f20ba8b66689eb3c4bfc731a62e1aea7eece11893ac2664546f5fa0a6bb8" Nov 29 04:24:02 crc kubenswrapper[4631]: I1129 04:24:02.825762 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9" Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.816042 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf"] Nov 29 04:24:06 crc kubenswrapper[4631]: E1129 04:24:06.816423 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d4ed5ca-bb17-4681-9640-03977d0545eb" containerName="extract" Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.816434 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d4ed5ca-bb17-4681-9640-03977d0545eb" containerName="extract" Nov 29 04:24:06 crc kubenswrapper[4631]: E1129 04:24:06.816450 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d4ed5ca-bb17-4681-9640-03977d0545eb" containerName="util" Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.816457 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d4ed5ca-bb17-4681-9640-03977d0545eb" containerName="util" Nov 29 04:24:06 crc kubenswrapper[4631]: E1129 04:24:06.816468 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d4ed5ca-bb17-4681-9640-03977d0545eb" containerName="pull" Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.816474 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d4ed5ca-bb17-4681-9640-03977d0545eb" containerName="pull" Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.816559 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d4ed5ca-bb17-4681-9640-03977d0545eb" containerName="extract" Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.816876 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf" Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.819531 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.820085 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.820572 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-mrjfm" Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.833092 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf"] Nov 29 04:24:06 crc kubenswrapper[4631]: I1129 04:24:06.909008 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jt4zh\" (UniqueName: \"kubernetes.io/projected/7183e04c-6dbe-4139-8bd2-a217adae2ab6-kube-api-access-jt4zh\") pod \"nmstate-operator-5b5b58f5c8-jrhjf\" (UID: \"7183e04c-6dbe-4139-8bd2-a217adae2ab6\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf" Nov 29 04:24:07 crc kubenswrapper[4631]: I1129 04:24:07.009652 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jt4zh\" (UniqueName: \"kubernetes.io/projected/7183e04c-6dbe-4139-8bd2-a217adae2ab6-kube-api-access-jt4zh\") pod \"nmstate-operator-5b5b58f5c8-jrhjf\" (UID: \"7183e04c-6dbe-4139-8bd2-a217adae2ab6\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf" Nov 29 04:24:07 crc kubenswrapper[4631]: I1129 04:24:07.041425 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jt4zh\" (UniqueName: \"kubernetes.io/projected/7183e04c-6dbe-4139-8bd2-a217adae2ab6-kube-api-access-jt4zh\") pod \"nmstate-operator-5b5b58f5c8-jrhjf\" (UID: \"7183e04c-6dbe-4139-8bd2-a217adae2ab6\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf" Nov 29 04:24:07 crc kubenswrapper[4631]: I1129 04:24:07.137167 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf" Nov 29 04:24:07 crc kubenswrapper[4631]: I1129 04:24:07.350310 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf"] Nov 29 04:24:07 crc kubenswrapper[4631]: I1129 04:24:07.854653 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf" event={"ID":"7183e04c-6dbe-4139-8bd2-a217adae2ab6","Type":"ContainerStarted","Data":"d3e785b991ae32f315645b5dc78b55c6ae14e2977cd4a8cee335c81e304512d7"} Nov 29 04:24:08 crc kubenswrapper[4631]: I1129 04:24:08.102639 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:24:08 crc kubenswrapper[4631]: I1129 04:24:08.102706 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:24:08 crc kubenswrapper[4631]: I1129 04:24:08.145852 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:24:08 crc kubenswrapper[4631]: I1129 04:24:08.931968 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:24:10 crc kubenswrapper[4631]: I1129 04:24:10.774597 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-74pt5"] Nov 29 04:24:10 crc kubenswrapper[4631]: I1129 04:24:10.874456 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf" event={"ID":"7183e04c-6dbe-4139-8bd2-a217adae2ab6","Type":"ContainerStarted","Data":"3da18a19b902d479477517dee80d1886fbf300804f839d7735322dbb6c8fdf77"} Nov 29 04:24:10 crc kubenswrapper[4631]: I1129 04:24:10.874587 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-74pt5" podUID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerName="registry-server" containerID="cri-o://0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984" gracePeriod=2 Nov 29 04:24:10 crc kubenswrapper[4631]: I1129 04:24:10.916191 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jrhjf" podStartSLOduration=1.8313021059999999 podStartE2EDuration="4.91617322s" podCreationTimestamp="2025-11-29 04:24:06 +0000 UTC" firstStartedPulling="2025-11-29 04:24:07.372938079 +0000 UTC m=+774.437441593" lastFinishedPulling="2025-11-29 04:24:10.457809153 +0000 UTC m=+777.522312707" observedRunningTime="2025-11-29 04:24:10.912066213 +0000 UTC m=+777.976569727" watchObservedRunningTime="2025-11-29 04:24:10.91617322 +0000 UTC m=+777.980676744" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.239970 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.373000 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-utilities\") pod \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.373041 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-catalog-content\") pod \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.373080 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8v8b\" (UniqueName: \"kubernetes.io/projected/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-kube-api-access-c8v8b\") pod \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\" (UID: \"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939\") " Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.375684 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-utilities" (OuterVolumeSpecName: "utilities") pod "5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" (UID: "5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.385588 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-kube-api-access-c8v8b" (OuterVolumeSpecName: "kube-api-access-c8v8b") pod "5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" (UID: "5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939"). InnerVolumeSpecName "kube-api-access-c8v8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.474272 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.474310 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8v8b\" (UniqueName: \"kubernetes.io/projected/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-kube-api-access-c8v8b\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.504802 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" (UID: "5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.575304 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.885282 4631 generic.go:334] "Generic (PLEG): container finished" podID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerID="0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984" exitCode=0 Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.886253 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-74pt5" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.886489 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74pt5" event={"ID":"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939","Type":"ContainerDied","Data":"0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984"} Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.886567 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-74pt5" event={"ID":"5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939","Type":"ContainerDied","Data":"50cfe84ba75cef326938eb2eb62aaa252b2bb6043ade3f376800390bae35dda8"} Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.886615 4631 scope.go:117] "RemoveContainer" containerID="0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.915212 4631 scope.go:117] "RemoveContainer" containerID="4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.937027 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-74pt5"] Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.945230 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-74pt5"] Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.966575 4631 scope.go:117] "RemoveContainer" containerID="8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.981728 4631 scope.go:117] "RemoveContainer" containerID="0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984" Nov 29 04:24:11 crc kubenswrapper[4631]: E1129 04:24:11.983928 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984\": container with ID starting with 0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984 not found: ID does not exist" containerID="0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.984023 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984"} err="failed to get container status \"0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984\": rpc error: code = NotFound desc = could not find container \"0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984\": container with ID starting with 0642a9ec89b1d22c6ea11d2439c52dca14ae28f6195acdbfbab1b10e4b741984 not found: ID does not exist" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.984099 4631 scope.go:117] "RemoveContainer" containerID="4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3" Nov 29 04:24:11 crc kubenswrapper[4631]: E1129 04:24:11.984647 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3\": container with ID starting with 4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3 not found: ID does not exist" containerID="4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.984697 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3"} err="failed to get container status \"4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3\": rpc error: code = NotFound desc = could not find container \"4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3\": container with ID starting with 4cf40247248ee1d11401bf8fe3cdb076fc3baa1320fdebdf8d18cc2ce85b0df3 not found: ID does not exist" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.984733 4631 scope.go:117] "RemoveContainer" containerID="8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb" Nov 29 04:24:11 crc kubenswrapper[4631]: E1129 04:24:11.985246 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb\": container with ID starting with 8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb not found: ID does not exist" containerID="8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb" Nov 29 04:24:11 crc kubenswrapper[4631]: I1129 04:24:11.985286 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb"} err="failed to get container status \"8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb\": rpc error: code = NotFound desc = could not find container \"8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb\": container with ID starting with 8e46964c77ced8503b92e63bcde01d0ec8b6d9b8202578fee4c0bfa5dddb22bb not found: ID does not exist" Nov 29 04:24:13 crc kubenswrapper[4631]: I1129 04:24:13.224709 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" path="/var/lib/kubelet/pods/5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939/volumes" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.052907 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds"] Nov 29 04:24:16 crc kubenswrapper[4631]: E1129 04:24:16.053797 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerName="extract-content" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.053818 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerName="extract-content" Nov 29 04:24:16 crc kubenswrapper[4631]: E1129 04:24:16.053856 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerName="registry-server" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.053870 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerName="registry-server" Nov 29 04:24:16 crc kubenswrapper[4631]: E1129 04:24:16.053890 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerName="extract-utilities" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.053906 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerName="extract-utilities" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.054092 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f12d4d0-f47a-4def-9cf9-a4fa1bbf5939" containerName="registry-server" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.055024 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.062168 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng"] Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.062899 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-fg548" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.063109 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.067570 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.124801 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds"] Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.128744 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng"] Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.136801 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-dmhbd"] Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.137573 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.150741 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/a232adff-6fb5-4120-8f41-8bf310322024-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-xjsng\" (UID: \"a232adff-6fb5-4120-8f41-8bf310322024\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.150777 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkflt\" (UniqueName: \"kubernetes.io/projected/a232adff-6fb5-4120-8f41-8bf310322024-kube-api-access-zkflt\") pod \"nmstate-webhook-5f6d4c5ccb-xjsng\" (UID: \"a232adff-6fb5-4120-8f41-8bf310322024\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.150796 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-ovs-socket\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.150816 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-nmstate-lock\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.150839 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5jr4\" (UniqueName: \"kubernetes.io/projected/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-kube-api-access-d5jr4\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.150871 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-dbus-socket\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.150902 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6grnp\" (UniqueName: \"kubernetes.io/projected/f1476f66-a509-4f8a-937f-b7b5e906a7e2-kube-api-access-6grnp\") pod \"nmstate-metrics-7f946cbc9-ncfds\" (UID: \"f1476f66-a509-4f8a-937f-b7b5e906a7e2\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.228960 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj"] Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.229984 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.231974 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.232128 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-68cnw" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.233349 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.238605 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj"] Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.251378 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6grnp\" (UniqueName: \"kubernetes.io/projected/f1476f66-a509-4f8a-937f-b7b5e906a7e2-kube-api-access-6grnp\") pod \"nmstate-metrics-7f946cbc9-ncfds\" (UID: \"f1476f66-a509-4f8a-937f-b7b5e906a7e2\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.251438 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/a232adff-6fb5-4120-8f41-8bf310322024-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-xjsng\" (UID: \"a232adff-6fb5-4120-8f41-8bf310322024\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.251458 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkflt\" (UniqueName: \"kubernetes.io/projected/a232adff-6fb5-4120-8f41-8bf310322024-kube-api-access-zkflt\") pod \"nmstate-webhook-5f6d4c5ccb-xjsng\" (UID: \"a232adff-6fb5-4120-8f41-8bf310322024\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.251478 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-ovs-socket\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.251494 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-nmstate-lock\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.251518 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5jr4\" (UniqueName: \"kubernetes.io/projected/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-kube-api-access-d5jr4\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.251551 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-dbus-socket\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.251768 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-dbus-socket\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.251805 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-ovs-socket\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.251826 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-nmstate-lock\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.272482 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkflt\" (UniqueName: \"kubernetes.io/projected/a232adff-6fb5-4120-8f41-8bf310322024-kube-api-access-zkflt\") pod \"nmstate-webhook-5f6d4c5ccb-xjsng\" (UID: \"a232adff-6fb5-4120-8f41-8bf310322024\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.272822 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6grnp\" (UniqueName: \"kubernetes.io/projected/f1476f66-a509-4f8a-937f-b7b5e906a7e2-kube-api-access-6grnp\") pod \"nmstate-metrics-7f946cbc9-ncfds\" (UID: \"f1476f66-a509-4f8a-937f-b7b5e906a7e2\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.278883 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5jr4\" (UniqueName: \"kubernetes.io/projected/fbd8e3f0-5167-4724-8f85-8648acdb3f6b-kube-api-access-d5jr4\") pod \"nmstate-handler-dmhbd\" (UID: \"fbd8e3f0-5167-4724-8f85-8648acdb3f6b\") " pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.278964 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/a232adff-6fb5-4120-8f41-8bf310322024-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-xjsng\" (UID: \"a232adff-6fb5-4120-8f41-8bf310322024\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.352255 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sszjf\" (UniqueName: \"kubernetes.io/projected/09679895-91f4-4a46-a9cb-03bb2bd32537-kube-api-access-sszjf\") pod \"nmstate-console-plugin-7fbb5f6569-tlptj\" (UID: \"09679895-91f4-4a46-a9cb-03bb2bd32537\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.352313 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/09679895-91f4-4a46-a9cb-03bb2bd32537-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-tlptj\" (UID: \"09679895-91f4-4a46-a9cb-03bb2bd32537\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.352344 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/09679895-91f4-4a46-a9cb-03bb2bd32537-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-tlptj\" (UID: \"09679895-91f4-4a46-a9cb-03bb2bd32537\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.402297 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6448654b5-2hh96"] Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.403112 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.414065 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6448654b5-2hh96"] Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.427022 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.432598 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.453091 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sszjf\" (UniqueName: \"kubernetes.io/projected/09679895-91f4-4a46-a9cb-03bb2bd32537-kube-api-access-sszjf\") pod \"nmstate-console-plugin-7fbb5f6569-tlptj\" (UID: \"09679895-91f4-4a46-a9cb-03bb2bd32537\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.453133 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/09679895-91f4-4a46-a9cb-03bb2bd32537-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-tlptj\" (UID: \"09679895-91f4-4a46-a9cb-03bb2bd32537\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.453156 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/09679895-91f4-4a46-a9cb-03bb2bd32537-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-tlptj\" (UID: \"09679895-91f4-4a46-a9cb-03bb2bd32537\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.453937 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/09679895-91f4-4a46-a9cb-03bb2bd32537-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-tlptj\" (UID: \"09679895-91f4-4a46-a9cb-03bb2bd32537\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:16 crc kubenswrapper[4631]: E1129 04:24:16.453975 4631 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 29 04:24:16 crc kubenswrapper[4631]: E1129 04:24:16.454493 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09679895-91f4-4a46-a9cb-03bb2bd32537-plugin-serving-cert podName:09679895-91f4-4a46-a9cb-03bb2bd32537 nodeName:}" failed. No retries permitted until 2025-11-29 04:24:16.954479023 +0000 UTC m=+784.018982537 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/09679895-91f4-4a46-a9cb-03bb2bd32537-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-tlptj" (UID: "09679895-91f4-4a46-a9cb-03bb2bd32537") : secret "plugin-serving-cert" not found Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.465851 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.478304 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sszjf\" (UniqueName: \"kubernetes.io/projected/09679895-91f4-4a46-a9cb-03bb2bd32537-kube-api-access-sszjf\") pod \"nmstate-console-plugin-7fbb5f6569-tlptj\" (UID: \"09679895-91f4-4a46-a9cb-03bb2bd32537\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.554278 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ef81b43e-ebe5-479f-99aa-db66f264f510-console-serving-cert\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.554315 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-oauth-serving-cert\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.554349 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-service-ca\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.554370 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-console-config\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.554386 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5flt2\" (UniqueName: \"kubernetes.io/projected/ef81b43e-ebe5-479f-99aa-db66f264f510-kube-api-access-5flt2\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.554406 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-trusted-ca-bundle\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.554442 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ef81b43e-ebe5-479f-99aa-db66f264f510-console-oauth-config\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: W1129 04:24:16.648095 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1476f66_a509_4f8a_937f_b7b5e906a7e2.slice/crio-d6a5537e2498577599debd4be292b42906656ea65783012c7d82b4584b1bd862 WatchSource:0}: Error finding container d6a5537e2498577599debd4be292b42906656ea65783012c7d82b4584b1bd862: Status 404 returned error can't find the container with id d6a5537e2498577599debd4be292b42906656ea65783012c7d82b4584b1bd862 Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.648428 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds"] Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.656383 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ef81b43e-ebe5-479f-99aa-db66f264f510-console-oauth-config\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.656482 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ef81b43e-ebe5-479f-99aa-db66f264f510-console-serving-cert\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.656529 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-oauth-serving-cert\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.656551 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-service-ca\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.656584 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5flt2\" (UniqueName: \"kubernetes.io/projected/ef81b43e-ebe5-479f-99aa-db66f264f510-kube-api-access-5flt2\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.656600 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-console-config\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.656618 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-trusted-ca-bundle\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.657931 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-service-ca\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.658089 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-console-config\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.658269 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-oauth-serving-cert\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.658436 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ef81b43e-ebe5-479f-99aa-db66f264f510-trusted-ca-bundle\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.661367 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ef81b43e-ebe5-479f-99aa-db66f264f510-console-serving-cert\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.661732 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ef81b43e-ebe5-479f-99aa-db66f264f510-console-oauth-config\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.670563 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5flt2\" (UniqueName: \"kubernetes.io/projected/ef81b43e-ebe5-479f-99aa-db66f264f510-kube-api-access-5flt2\") pod \"console-6448654b5-2hh96\" (UID: \"ef81b43e-ebe5-479f-99aa-db66f264f510\") " pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.715516 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.897347 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6448654b5-2hh96"] Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.913741 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng"] Nov 29 04:24:16 crc kubenswrapper[4631]: W1129 04:24:16.917510 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda232adff_6fb5_4120_8f41_8bf310322024.slice/crio-5567169f6fc78d8f180c9553dc64f5db7f2ed917ee01543b9d7f26239b88a567 WatchSource:0}: Error finding container 5567169f6fc78d8f180c9553dc64f5db7f2ed917ee01543b9d7f26239b88a567: Status 404 returned error can't find the container with id 5567169f6fc78d8f180c9553dc64f5db7f2ed917ee01543b9d7f26239b88a567 Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.922653 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6448654b5-2hh96" event={"ID":"ef81b43e-ebe5-479f-99aa-db66f264f510","Type":"ContainerStarted","Data":"6bac247a895f19bcfa803e541c6c25b974df185a4bf29049b1a9dca845d0473a"} Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.923484 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds" event={"ID":"f1476f66-a509-4f8a-937f-b7b5e906a7e2","Type":"ContainerStarted","Data":"d6a5537e2498577599debd4be292b42906656ea65783012c7d82b4584b1bd862"} Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.924546 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-dmhbd" event={"ID":"fbd8e3f0-5167-4724-8f85-8648acdb3f6b","Type":"ContainerStarted","Data":"9b5225c0018c0339dc8aeb74cb445af2436b2fc4c2aed10c13c193229082c4cd"} Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.961927 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/09679895-91f4-4a46-a9cb-03bb2bd32537-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-tlptj\" (UID: \"09679895-91f4-4a46-a9cb-03bb2bd32537\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:16 crc kubenswrapper[4631]: I1129 04:24:16.966082 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/09679895-91f4-4a46-a9cb-03bb2bd32537-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-tlptj\" (UID: \"09679895-91f4-4a46-a9cb-03bb2bd32537\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:17 crc kubenswrapper[4631]: I1129 04:24:17.145078 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" Nov 29 04:24:17 crc kubenswrapper[4631]: I1129 04:24:17.417854 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj"] Nov 29 04:24:17 crc kubenswrapper[4631]: I1129 04:24:17.934555 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" event={"ID":"09679895-91f4-4a46-a9cb-03bb2bd32537","Type":"ContainerStarted","Data":"dbb2f9d6a5b0945f7787777e6048ad5d72125aea940540492306bf171da9f714"} Nov 29 04:24:17 crc kubenswrapper[4631]: I1129 04:24:17.936844 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6448654b5-2hh96" event={"ID":"ef81b43e-ebe5-479f-99aa-db66f264f510","Type":"ContainerStarted","Data":"c403061160c2ef1a2aa7d1e05bd38d3a7196779e79908503198458537f1725c9"} Nov 29 04:24:17 crc kubenswrapper[4631]: I1129 04:24:17.942825 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" event={"ID":"a232adff-6fb5-4120-8f41-8bf310322024","Type":"ContainerStarted","Data":"5567169f6fc78d8f180c9553dc64f5db7f2ed917ee01543b9d7f26239b88a567"} Nov 29 04:24:20 crc kubenswrapper[4631]: I1129 04:24:20.716203 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:24:20 crc kubenswrapper[4631]: I1129 04:24:20.716869 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:24:20 crc kubenswrapper[4631]: I1129 04:24:20.962944 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds" event={"ID":"f1476f66-a509-4f8a-937f-b7b5e906a7e2","Type":"ContainerStarted","Data":"346557b6941dba2a2067989baef89a6da470bcfe97ac2822fc34e319091d3645"} Nov 29 04:24:20 crc kubenswrapper[4631]: I1129 04:24:20.965301 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-dmhbd" event={"ID":"fbd8e3f0-5167-4724-8f85-8648acdb3f6b","Type":"ContainerStarted","Data":"a26d28645ae6ed8c004e7b4d2b65245601e4135dcbec3bf037952568f329ae08"} Nov 29 04:24:20 crc kubenswrapper[4631]: I1129 04:24:20.965545 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:20 crc kubenswrapper[4631]: I1129 04:24:20.968288 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" event={"ID":"a232adff-6fb5-4120-8f41-8bf310322024","Type":"ContainerStarted","Data":"8cf84e15b1e67b638df1fb6c1ed67e4b16de342769afaf17ce7599989cf63f72"} Nov 29 04:24:20 crc kubenswrapper[4631]: I1129 04:24:20.968474 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" Nov 29 04:24:20 crc kubenswrapper[4631]: I1129 04:24:20.970728 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" event={"ID":"09679895-91f4-4a46-a9cb-03bb2bd32537","Type":"ContainerStarted","Data":"01cc1c23d2305e5363bf0cf0ba709ea28769718d67530ccf0c932a46f5a60e27"} Nov 29 04:24:20 crc kubenswrapper[4631]: I1129 04:24:20.983590 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-dmhbd" podStartSLOduration=1.657652989 podStartE2EDuration="4.983570341s" podCreationTimestamp="2025-11-29 04:24:16 +0000 UTC" firstStartedPulling="2025-11-29 04:24:16.490617805 +0000 UTC m=+783.555121319" lastFinishedPulling="2025-11-29 04:24:19.816535117 +0000 UTC m=+786.881038671" observedRunningTime="2025-11-29 04:24:20.980901289 +0000 UTC m=+788.045404843" watchObservedRunningTime="2025-11-29 04:24:20.983570341 +0000 UTC m=+788.048073865" Nov 29 04:24:20 crc kubenswrapper[4631]: I1129 04:24:20.984360 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6448654b5-2hh96" podStartSLOduration=4.984351509 podStartE2EDuration="4.984351509s" podCreationTimestamp="2025-11-29 04:24:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:24:17.968311957 +0000 UTC m=+785.032815511" watchObservedRunningTime="2025-11-29 04:24:20.984351509 +0000 UTC m=+788.048855033" Nov 29 04:24:21 crc kubenswrapper[4631]: I1129 04:24:21.001654 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-tlptj" podStartSLOduration=2.620509523 podStartE2EDuration="5.001629406s" podCreationTimestamp="2025-11-29 04:24:16 +0000 UTC" firstStartedPulling="2025-11-29 04:24:17.434441932 +0000 UTC m=+784.498945446" lastFinishedPulling="2025-11-29 04:24:19.815561755 +0000 UTC m=+786.880065329" observedRunningTime="2025-11-29 04:24:20.994600615 +0000 UTC m=+788.059104159" watchObservedRunningTime="2025-11-29 04:24:21.001629406 +0000 UTC m=+788.066132920" Nov 29 04:24:21 crc kubenswrapper[4631]: I1129 04:24:21.022882 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" podStartSLOduration=2.10834776 podStartE2EDuration="5.022865245s" podCreationTimestamp="2025-11-29 04:24:16 +0000 UTC" firstStartedPulling="2025-11-29 04:24:16.919576866 +0000 UTC m=+783.984080380" lastFinishedPulling="2025-11-29 04:24:19.834094341 +0000 UTC m=+786.898597865" observedRunningTime="2025-11-29 04:24:21.0217677 +0000 UTC m=+788.086271224" watchObservedRunningTime="2025-11-29 04:24:21.022865245 +0000 UTC m=+788.087368759" Nov 29 04:24:22 crc kubenswrapper[4631]: I1129 04:24:22.994014 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds" event={"ID":"f1476f66-a509-4f8a-937f-b7b5e906a7e2","Type":"ContainerStarted","Data":"479d8aff522a3a9227fc662cd9fe1cb27b6227c040fc9b7b49ce22525982af2b"} Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.192377 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-ncfds" podStartSLOduration=3.433929753 podStartE2EDuration="9.192320447s" podCreationTimestamp="2025-11-29 04:24:16 +0000 UTC" firstStartedPulling="2025-11-29 04:24:16.650876103 +0000 UTC m=+783.715379607" lastFinishedPulling="2025-11-29 04:24:22.409266787 +0000 UTC m=+789.473770301" observedRunningTime="2025-11-29 04:24:23.021860204 +0000 UTC m=+790.086363748" watchObservedRunningTime="2025-11-29 04:24:25.192320447 +0000 UTC m=+792.256823991" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.192958 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nrt2x"] Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.194581 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.213538 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nrt2x"] Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.278510 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-utilities\") pod \"community-operators-nrt2x\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.278708 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wtb4\" (UniqueName: \"kubernetes.io/projected/a120f80b-0dad-4c20-91b1-c510f1998917-kube-api-access-7wtb4\") pod \"community-operators-nrt2x\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.278837 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-catalog-content\") pod \"community-operators-nrt2x\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.380102 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-utilities\") pod \"community-operators-nrt2x\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.380148 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wtb4\" (UniqueName: \"kubernetes.io/projected/a120f80b-0dad-4c20-91b1-c510f1998917-kube-api-access-7wtb4\") pod \"community-operators-nrt2x\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.380188 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-catalog-content\") pod \"community-operators-nrt2x\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.380719 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-catalog-content\") pod \"community-operators-nrt2x\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.380903 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-utilities\") pod \"community-operators-nrt2x\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.403768 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wtb4\" (UniqueName: \"kubernetes.io/projected/a120f80b-0dad-4c20-91b1-c510f1998917-kube-api-access-7wtb4\") pod \"community-operators-nrt2x\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.525500 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:25 crc kubenswrapper[4631]: I1129 04:24:25.813968 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nrt2x"] Nov 29 04:24:26 crc kubenswrapper[4631]: I1129 04:24:26.019237 4631 generic.go:334] "Generic (PLEG): container finished" podID="a120f80b-0dad-4c20-91b1-c510f1998917" containerID="09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4" exitCode=0 Nov 29 04:24:26 crc kubenswrapper[4631]: I1129 04:24:26.019421 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrt2x" event={"ID":"a120f80b-0dad-4c20-91b1-c510f1998917","Type":"ContainerDied","Data":"09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4"} Nov 29 04:24:26 crc kubenswrapper[4631]: I1129 04:24:26.019896 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrt2x" event={"ID":"a120f80b-0dad-4c20-91b1-c510f1998917","Type":"ContainerStarted","Data":"6764cb971a6a2ddfd64cd07c5a25deb290228de1be02b4e7a9110d5784171688"} Nov 29 04:24:26 crc kubenswrapper[4631]: I1129 04:24:26.502527 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-dmhbd" Nov 29 04:24:26 crc kubenswrapper[4631]: I1129 04:24:26.715837 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:26 crc kubenswrapper[4631]: I1129 04:24:26.716182 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:26 crc kubenswrapper[4631]: I1129 04:24:26.723875 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:27 crc kubenswrapper[4631]: I1129 04:24:27.030392 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrt2x" event={"ID":"a120f80b-0dad-4c20-91b1-c510f1998917","Type":"ContainerStarted","Data":"bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f"} Nov 29 04:24:27 crc kubenswrapper[4631]: I1129 04:24:27.037053 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6448654b5-2hh96" Nov 29 04:24:27 crc kubenswrapper[4631]: I1129 04:24:27.114905 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-896b9"] Nov 29 04:24:28 crc kubenswrapper[4631]: I1129 04:24:28.053574 4631 generic.go:334] "Generic (PLEG): container finished" podID="a120f80b-0dad-4c20-91b1-c510f1998917" containerID="bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f" exitCode=0 Nov 29 04:24:28 crc kubenswrapper[4631]: I1129 04:24:28.053665 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrt2x" event={"ID":"a120f80b-0dad-4c20-91b1-c510f1998917","Type":"ContainerDied","Data":"bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f"} Nov 29 04:24:29 crc kubenswrapper[4631]: I1129 04:24:29.062659 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrt2x" event={"ID":"a120f80b-0dad-4c20-91b1-c510f1998917","Type":"ContainerStarted","Data":"1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153"} Nov 29 04:24:29 crc kubenswrapper[4631]: I1129 04:24:29.090374 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nrt2x" podStartSLOduration=1.63130994 podStartE2EDuration="4.090357294s" podCreationTimestamp="2025-11-29 04:24:25 +0000 UTC" firstStartedPulling="2025-11-29 04:24:26.021061608 +0000 UTC m=+793.085565122" lastFinishedPulling="2025-11-29 04:24:28.480108922 +0000 UTC m=+795.544612476" observedRunningTime="2025-11-29 04:24:29.083877755 +0000 UTC m=+796.148381289" watchObservedRunningTime="2025-11-29 04:24:29.090357294 +0000 UTC m=+796.154860818" Nov 29 04:24:35 crc kubenswrapper[4631]: I1129 04:24:35.526678 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:35 crc kubenswrapper[4631]: I1129 04:24:35.527502 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:35 crc kubenswrapper[4631]: I1129 04:24:35.583739 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:36 crc kubenswrapper[4631]: I1129 04:24:36.192532 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:36 crc kubenswrapper[4631]: I1129 04:24:36.266163 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nrt2x"] Nov 29 04:24:36 crc kubenswrapper[4631]: I1129 04:24:36.441935 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-xjsng" Nov 29 04:24:38 crc kubenswrapper[4631]: I1129 04:24:38.135580 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nrt2x" podUID="a120f80b-0dad-4c20-91b1-c510f1998917" containerName="registry-server" containerID="cri-o://1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153" gracePeriod=2 Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.091509 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.142411 4631 generic.go:334] "Generic (PLEG): container finished" podID="a120f80b-0dad-4c20-91b1-c510f1998917" containerID="1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153" exitCode=0 Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.142460 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrt2x" event={"ID":"a120f80b-0dad-4c20-91b1-c510f1998917","Type":"ContainerDied","Data":"1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153"} Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.142537 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrt2x" event={"ID":"a120f80b-0dad-4c20-91b1-c510f1998917","Type":"ContainerDied","Data":"6764cb971a6a2ddfd64cd07c5a25deb290228de1be02b4e7a9110d5784171688"} Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.142563 4631 scope.go:117] "RemoveContainer" containerID="1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.142473 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrt2x" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.157441 4631 scope.go:117] "RemoveContainer" containerID="bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.173459 4631 scope.go:117] "RemoveContainer" containerID="09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.194882 4631 scope.go:117] "RemoveContainer" containerID="1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153" Nov 29 04:24:39 crc kubenswrapper[4631]: E1129 04:24:39.195322 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153\": container with ID starting with 1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153 not found: ID does not exist" containerID="1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.195376 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153"} err="failed to get container status \"1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153\": rpc error: code = NotFound desc = could not find container \"1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153\": container with ID starting with 1795040b260a5f497137de17b5e395c047911430e101140f611ce2abc2521153 not found: ID does not exist" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.195396 4631 scope.go:117] "RemoveContainer" containerID="bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f" Nov 29 04:24:39 crc kubenswrapper[4631]: E1129 04:24:39.195784 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f\": container with ID starting with bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f not found: ID does not exist" containerID="bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.195818 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f"} err="failed to get container status \"bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f\": rpc error: code = NotFound desc = could not find container \"bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f\": container with ID starting with bee8cc3e6e170cf9d7e6a6f74b5794af2a309e265600feeae6a14a4c7470dc3f not found: ID does not exist" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.195843 4631 scope.go:117] "RemoveContainer" containerID="09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4" Nov 29 04:24:39 crc kubenswrapper[4631]: E1129 04:24:39.196491 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4\": container with ID starting with 09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4 not found: ID does not exist" containerID="09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.196509 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4"} err="failed to get container status \"09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4\": rpc error: code = NotFound desc = could not find container \"09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4\": container with ID starting with 09488824efc718715ab0c092c6a0dc91bdd9cb944916ad7bb33738db455254e4 not found: ID does not exist" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.227805 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-utilities\") pod \"a120f80b-0dad-4c20-91b1-c510f1998917\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.227859 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-catalog-content\") pod \"a120f80b-0dad-4c20-91b1-c510f1998917\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.227998 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wtb4\" (UniqueName: \"kubernetes.io/projected/a120f80b-0dad-4c20-91b1-c510f1998917-kube-api-access-7wtb4\") pod \"a120f80b-0dad-4c20-91b1-c510f1998917\" (UID: \"a120f80b-0dad-4c20-91b1-c510f1998917\") " Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.228767 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-utilities" (OuterVolumeSpecName: "utilities") pod "a120f80b-0dad-4c20-91b1-c510f1998917" (UID: "a120f80b-0dad-4c20-91b1-c510f1998917"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.234936 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a120f80b-0dad-4c20-91b1-c510f1998917-kube-api-access-7wtb4" (OuterVolumeSpecName: "kube-api-access-7wtb4") pod "a120f80b-0dad-4c20-91b1-c510f1998917" (UID: "a120f80b-0dad-4c20-91b1-c510f1998917"). InnerVolumeSpecName "kube-api-access-7wtb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.279869 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a120f80b-0dad-4c20-91b1-c510f1998917" (UID: "a120f80b-0dad-4c20-91b1-c510f1998917"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.329134 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wtb4\" (UniqueName: \"kubernetes.io/projected/a120f80b-0dad-4c20-91b1-c510f1998917-kube-api-access-7wtb4\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.329162 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.329171 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a120f80b-0dad-4c20-91b1-c510f1998917-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.495125 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nrt2x"] Nov 29 04:24:39 crc kubenswrapper[4631]: I1129 04:24:39.501153 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nrt2x"] Nov 29 04:24:41 crc kubenswrapper[4631]: I1129 04:24:41.225195 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a120f80b-0dad-4c20-91b1-c510f1998917" path="/var/lib/kubelet/pods/a120f80b-0dad-4c20-91b1-c510f1998917/volumes" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.254798 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh"] Nov 29 04:24:50 crc kubenswrapper[4631]: E1129 04:24:50.255749 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a120f80b-0dad-4c20-91b1-c510f1998917" containerName="registry-server" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.255768 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="a120f80b-0dad-4c20-91b1-c510f1998917" containerName="registry-server" Nov 29 04:24:50 crc kubenswrapper[4631]: E1129 04:24:50.255799 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a120f80b-0dad-4c20-91b1-c510f1998917" containerName="extract-content" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.255806 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="a120f80b-0dad-4c20-91b1-c510f1998917" containerName="extract-content" Nov 29 04:24:50 crc kubenswrapper[4631]: E1129 04:24:50.255816 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a120f80b-0dad-4c20-91b1-c510f1998917" containerName="extract-utilities" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.255822 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="a120f80b-0dad-4c20-91b1-c510f1998917" containerName="extract-utilities" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.256031 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="a120f80b-0dad-4c20-91b1-c510f1998917" containerName="registry-server" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.259064 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.261311 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.316578 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh"] Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.397289 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.397551 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx25v\" (UniqueName: \"kubernetes.io/projected/42828980-6a57-467a-85e4-690f84a7f368-kube-api-access-rx25v\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.397670 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.499402 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx25v\" (UniqueName: \"kubernetes.io/projected/42828980-6a57-467a-85e4-690f84a7f368-kube-api-access-rx25v\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.499792 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.499894 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.500637 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.501549 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.521778 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx25v\" (UniqueName: \"kubernetes.io/projected/42828980-6a57-467a-85e4-690f84a7f368-kube-api-access-rx25v\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.725155 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.727650 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:24:50 crc kubenswrapper[4631]: I1129 04:24:50.727707 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:24:51 crc kubenswrapper[4631]: I1129 04:24:51.003192 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh"] Nov 29 04:24:51 crc kubenswrapper[4631]: I1129 04:24:51.226518 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" event={"ID":"42828980-6a57-467a-85e4-690f84a7f368","Type":"ContainerStarted","Data":"4dedfb1144ce7f0b6410d30854fd7ce1d84e4f13c63497b6ac4fa617d8eff608"} Nov 29 04:24:51 crc kubenswrapper[4631]: I1129 04:24:51.226874 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" event={"ID":"42828980-6a57-467a-85e4-690f84a7f368","Type":"ContainerStarted","Data":"af669d0314785b1e7da094453ac40c680e93b8119e5cf55a0b6be2f1afbb43c3"} Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.163467 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-896b9" podUID="002a7abb-c9ed-4ae8-92da-b4985ff0643c" containerName="console" containerID="cri-o://2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f" gracePeriod=15 Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.237092 4631 generic.go:334] "Generic (PLEG): container finished" podID="42828980-6a57-467a-85e4-690f84a7f368" containerID="4dedfb1144ce7f0b6410d30854fd7ce1d84e4f13c63497b6ac4fa617d8eff608" exitCode=0 Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.237172 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" event={"ID":"42828980-6a57-467a-85e4-690f84a7f368","Type":"ContainerDied","Data":"4dedfb1144ce7f0b6410d30854fd7ce1d84e4f13c63497b6ac4fa617d8eff608"} Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.592466 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-896b9_002a7abb-c9ed-4ae8-92da-b4985ff0643c/console/0.log" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.592536 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.728024 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-trusted-ca-bundle\") pod \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.728117 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-service-ca\") pod \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.728283 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-serving-cert\") pod \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.728954 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-oauth-serving-cert\") pod \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.729019 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hlnn\" (UniqueName: \"kubernetes.io/projected/002a7abb-c9ed-4ae8-92da-b4985ff0643c-kube-api-access-8hlnn\") pod \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.729141 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-oauth-config\") pod \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.729243 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-config\") pod \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\" (UID: \"002a7abb-c9ed-4ae8-92da-b4985ff0643c\") " Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.729770 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "002a7abb-c9ed-4ae8-92da-b4985ff0643c" (UID: "002a7abb-c9ed-4ae8-92da-b4985ff0643c"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.729906 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-service-ca" (OuterVolumeSpecName: "service-ca") pod "002a7abb-c9ed-4ae8-92da-b4985ff0643c" (UID: "002a7abb-c9ed-4ae8-92da-b4985ff0643c"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.730147 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "002a7abb-c9ed-4ae8-92da-b4985ff0643c" (UID: "002a7abb-c9ed-4ae8-92da-b4985ff0643c"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.730167 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-config" (OuterVolumeSpecName: "console-config") pod "002a7abb-c9ed-4ae8-92da-b4985ff0643c" (UID: "002a7abb-c9ed-4ae8-92da-b4985ff0643c"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.735574 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "002a7abb-c9ed-4ae8-92da-b4985ff0643c" (UID: "002a7abb-c9ed-4ae8-92da-b4985ff0643c"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.736446 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/002a7abb-c9ed-4ae8-92da-b4985ff0643c-kube-api-access-8hlnn" (OuterVolumeSpecName: "kube-api-access-8hlnn") pod "002a7abb-c9ed-4ae8-92da-b4985ff0643c" (UID: "002a7abb-c9ed-4ae8-92da-b4985ff0643c"). InnerVolumeSpecName "kube-api-access-8hlnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.739388 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "002a7abb-c9ed-4ae8-92da-b4985ff0643c" (UID: "002a7abb-c9ed-4ae8-92da-b4985ff0643c"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.831299 4631 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.831390 4631 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.831421 4631 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-service-ca\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.831447 4631 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.831467 4631 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/002a7abb-c9ed-4ae8-92da-b4985ff0643c-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.831486 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hlnn\" (UniqueName: \"kubernetes.io/projected/002a7abb-c9ed-4ae8-92da-b4985ff0643c-kube-api-access-8hlnn\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:52 crc kubenswrapper[4631]: I1129 04:24:52.831506 4631 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/002a7abb-c9ed-4ae8-92da-b4985ff0643c-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:53 crc kubenswrapper[4631]: I1129 04:24:53.251100 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-896b9_002a7abb-c9ed-4ae8-92da-b4985ff0643c/console/0.log" Nov 29 04:24:53 crc kubenswrapper[4631]: I1129 04:24:53.251182 4631 generic.go:334] "Generic (PLEG): container finished" podID="002a7abb-c9ed-4ae8-92da-b4985ff0643c" containerID="2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f" exitCode=2 Nov 29 04:24:53 crc kubenswrapper[4631]: I1129 04:24:53.251225 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-896b9" event={"ID":"002a7abb-c9ed-4ae8-92da-b4985ff0643c","Type":"ContainerDied","Data":"2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f"} Nov 29 04:24:53 crc kubenswrapper[4631]: I1129 04:24:53.251266 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-896b9" event={"ID":"002a7abb-c9ed-4ae8-92da-b4985ff0643c","Type":"ContainerDied","Data":"c5f1eee8e998cf194d47953fb607a26423551858ac5870ba2efe7138fc2e75c7"} Nov 29 04:24:53 crc kubenswrapper[4631]: I1129 04:24:53.251235 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-896b9" Nov 29 04:24:53 crc kubenswrapper[4631]: I1129 04:24:53.251295 4631 scope.go:117] "RemoveContainer" containerID="2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f" Nov 29 04:24:53 crc kubenswrapper[4631]: I1129 04:24:53.293488 4631 scope.go:117] "RemoveContainer" containerID="2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f" Nov 29 04:24:53 crc kubenswrapper[4631]: E1129 04:24:53.294296 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f\": container with ID starting with 2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f not found: ID does not exist" containerID="2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f" Nov 29 04:24:53 crc kubenswrapper[4631]: I1129 04:24:53.294352 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f"} err="failed to get container status \"2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f\": rpc error: code = NotFound desc = could not find container \"2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f\": container with ID starting with 2508bc06cd6446e6dc5345ea092ac41410a3ea3687fb2270a866c8e5d250cd1f not found: ID does not exist" Nov 29 04:24:53 crc kubenswrapper[4631]: I1129 04:24:53.295413 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-896b9"] Nov 29 04:24:53 crc kubenswrapper[4631]: I1129 04:24:53.301575 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-896b9"] Nov 29 04:24:54 crc kubenswrapper[4631]: I1129 04:24:54.262165 4631 generic.go:334] "Generic (PLEG): container finished" podID="42828980-6a57-467a-85e4-690f84a7f368" containerID="01cc73d646094189d036a15367cf25541a5fb77e53f3f6eb26c2381e6289707a" exitCode=0 Nov 29 04:24:54 crc kubenswrapper[4631]: I1129 04:24:54.262282 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" event={"ID":"42828980-6a57-467a-85e4-690f84a7f368","Type":"ContainerDied","Data":"01cc73d646094189d036a15367cf25541a5fb77e53f3f6eb26c2381e6289707a"} Nov 29 04:24:55 crc kubenswrapper[4631]: I1129 04:24:55.228150 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="002a7abb-c9ed-4ae8-92da-b4985ff0643c" path="/var/lib/kubelet/pods/002a7abb-c9ed-4ae8-92da-b4985ff0643c/volumes" Nov 29 04:24:55 crc kubenswrapper[4631]: I1129 04:24:55.279881 4631 generic.go:334] "Generic (PLEG): container finished" podID="42828980-6a57-467a-85e4-690f84a7f368" containerID="1a741c57a0b7bcdacc84772dc6edd92a4db561113494e2a8b02b466ae2cb485b" exitCode=0 Nov 29 04:24:55 crc kubenswrapper[4631]: I1129 04:24:55.279926 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" event={"ID":"42828980-6a57-467a-85e4-690f84a7f368","Type":"ContainerDied","Data":"1a741c57a0b7bcdacc84772dc6edd92a4db561113494e2a8b02b466ae2cb485b"} Nov 29 04:24:56 crc kubenswrapper[4631]: I1129 04:24:56.610889 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:24:56 crc kubenswrapper[4631]: I1129 04:24:56.687380 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-util\") pod \"42828980-6a57-467a-85e4-690f84a7f368\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " Nov 29 04:24:56 crc kubenswrapper[4631]: I1129 04:24:56.687490 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-bundle\") pod \"42828980-6a57-467a-85e4-690f84a7f368\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " Nov 29 04:24:56 crc kubenswrapper[4631]: I1129 04:24:56.687574 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rx25v\" (UniqueName: \"kubernetes.io/projected/42828980-6a57-467a-85e4-690f84a7f368-kube-api-access-rx25v\") pod \"42828980-6a57-467a-85e4-690f84a7f368\" (UID: \"42828980-6a57-467a-85e4-690f84a7f368\") " Nov 29 04:24:56 crc kubenswrapper[4631]: I1129 04:24:56.688582 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-bundle" (OuterVolumeSpecName: "bundle") pod "42828980-6a57-467a-85e4-690f84a7f368" (UID: "42828980-6a57-467a-85e4-690f84a7f368"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:24:56 crc kubenswrapper[4631]: I1129 04:24:56.692699 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42828980-6a57-467a-85e4-690f84a7f368-kube-api-access-rx25v" (OuterVolumeSpecName: "kube-api-access-rx25v") pod "42828980-6a57-467a-85e4-690f84a7f368" (UID: "42828980-6a57-467a-85e4-690f84a7f368"). InnerVolumeSpecName "kube-api-access-rx25v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:24:56 crc kubenswrapper[4631]: I1129 04:24:56.712175 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-util" (OuterVolumeSpecName: "util") pod "42828980-6a57-467a-85e4-690f84a7f368" (UID: "42828980-6a57-467a-85e4-690f84a7f368"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:24:56 crc kubenswrapper[4631]: I1129 04:24:56.789651 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rx25v\" (UniqueName: \"kubernetes.io/projected/42828980-6a57-467a-85e4-690f84a7f368-kube-api-access-rx25v\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:56 crc kubenswrapper[4631]: I1129 04:24:56.789696 4631 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-util\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:56 crc kubenswrapper[4631]: I1129 04:24:56.789715 4631 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/42828980-6a57-467a-85e4-690f84a7f368-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:24:57 crc kubenswrapper[4631]: I1129 04:24:57.298247 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" event={"ID":"42828980-6a57-467a-85e4-690f84a7f368","Type":"ContainerDied","Data":"af669d0314785b1e7da094453ac40c680e93b8119e5cf55a0b6be2f1afbb43c3"} Nov 29 04:24:57 crc kubenswrapper[4631]: I1129 04:24:57.298309 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af669d0314785b1e7da094453ac40c680e93b8119e5cf55a0b6be2f1afbb43c3" Nov 29 04:24:57 crc kubenswrapper[4631]: I1129 04:24:57.298482 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.606265 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm"] Nov 29 04:25:07 crc kubenswrapper[4631]: E1129 04:25:07.606886 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42828980-6a57-467a-85e4-690f84a7f368" containerName="pull" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.606897 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="42828980-6a57-467a-85e4-690f84a7f368" containerName="pull" Nov 29 04:25:07 crc kubenswrapper[4631]: E1129 04:25:07.606909 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42828980-6a57-467a-85e4-690f84a7f368" containerName="util" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.606915 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="42828980-6a57-467a-85e4-690f84a7f368" containerName="util" Nov 29 04:25:07 crc kubenswrapper[4631]: E1129 04:25:07.606924 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42828980-6a57-467a-85e4-690f84a7f368" containerName="extract" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.606930 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="42828980-6a57-467a-85e4-690f84a7f368" containerName="extract" Nov 29 04:25:07 crc kubenswrapper[4631]: E1129 04:25:07.606941 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="002a7abb-c9ed-4ae8-92da-b4985ff0643c" containerName="console" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.606946 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="002a7abb-c9ed-4ae8-92da-b4985ff0643c" containerName="console" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.607029 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="002a7abb-c9ed-4ae8-92da-b4985ff0643c" containerName="console" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.607042 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="42828980-6a57-467a-85e4-690f84a7f368" containerName="extract" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.607377 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.610214 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.610492 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.610625 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.616028 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-hhnsh" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.620443 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.674853 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm"] Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.747577 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lg895\" (UniqueName: \"kubernetes.io/projected/b9fde2d9-7731-458b-9e00-216a755d629c-kube-api-access-lg895\") pod \"metallb-operator-controller-manager-86d54cd4bb-mfrnm\" (UID: \"b9fde2d9-7731-458b-9e00-216a755d629c\") " pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.747617 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b9fde2d9-7731-458b-9e00-216a755d629c-apiservice-cert\") pod \"metallb-operator-controller-manager-86d54cd4bb-mfrnm\" (UID: \"b9fde2d9-7731-458b-9e00-216a755d629c\") " pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.747645 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b9fde2d9-7731-458b-9e00-216a755d629c-webhook-cert\") pod \"metallb-operator-controller-manager-86d54cd4bb-mfrnm\" (UID: \"b9fde2d9-7731-458b-9e00-216a755d629c\") " pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.835174 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-8598fb747-q99bx"] Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.835790 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.839197 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.839868 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.840254 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-jdp2h" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.848505 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lg895\" (UniqueName: \"kubernetes.io/projected/b9fde2d9-7731-458b-9e00-216a755d629c-kube-api-access-lg895\") pod \"metallb-operator-controller-manager-86d54cd4bb-mfrnm\" (UID: \"b9fde2d9-7731-458b-9e00-216a755d629c\") " pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.848547 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b9fde2d9-7731-458b-9e00-216a755d629c-apiservice-cert\") pod \"metallb-operator-controller-manager-86d54cd4bb-mfrnm\" (UID: \"b9fde2d9-7731-458b-9e00-216a755d629c\") " pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.848573 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b9fde2d9-7731-458b-9e00-216a755d629c-webhook-cert\") pod \"metallb-operator-controller-manager-86d54cd4bb-mfrnm\" (UID: \"b9fde2d9-7731-458b-9e00-216a755d629c\") " pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.854130 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b9fde2d9-7731-458b-9e00-216a755d629c-webhook-cert\") pod \"metallb-operator-controller-manager-86d54cd4bb-mfrnm\" (UID: \"b9fde2d9-7731-458b-9e00-216a755d629c\") " pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.854664 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b9fde2d9-7731-458b-9e00-216a755d629c-apiservice-cert\") pod \"metallb-operator-controller-manager-86d54cd4bb-mfrnm\" (UID: \"b9fde2d9-7731-458b-9e00-216a755d629c\") " pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.863082 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-8598fb747-q99bx"] Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.886973 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lg895\" (UniqueName: \"kubernetes.io/projected/b9fde2d9-7731-458b-9e00-216a755d629c-kube-api-access-lg895\") pod \"metallb-operator-controller-manager-86d54cd4bb-mfrnm\" (UID: \"b9fde2d9-7731-458b-9e00-216a755d629c\") " pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.920784 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.949632 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab-webhook-cert\") pod \"metallb-operator-webhook-server-8598fb747-q99bx\" (UID: \"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab\") " pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.949849 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stpvq\" (UniqueName: \"kubernetes.io/projected/e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab-kube-api-access-stpvq\") pod \"metallb-operator-webhook-server-8598fb747-q99bx\" (UID: \"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab\") " pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:07 crc kubenswrapper[4631]: I1129 04:25:07.950005 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab-apiservice-cert\") pod \"metallb-operator-webhook-server-8598fb747-q99bx\" (UID: \"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab\") " pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:08 crc kubenswrapper[4631]: I1129 04:25:08.050998 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab-webhook-cert\") pod \"metallb-operator-webhook-server-8598fb747-q99bx\" (UID: \"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab\") " pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:08 crc kubenswrapper[4631]: I1129 04:25:08.051060 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stpvq\" (UniqueName: \"kubernetes.io/projected/e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab-kube-api-access-stpvq\") pod \"metallb-operator-webhook-server-8598fb747-q99bx\" (UID: \"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab\") " pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:08 crc kubenswrapper[4631]: I1129 04:25:08.051142 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab-apiservice-cert\") pod \"metallb-operator-webhook-server-8598fb747-q99bx\" (UID: \"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab\") " pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:08 crc kubenswrapper[4631]: I1129 04:25:08.054381 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab-apiservice-cert\") pod \"metallb-operator-webhook-server-8598fb747-q99bx\" (UID: \"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab\") " pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:08 crc kubenswrapper[4631]: I1129 04:25:08.058874 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab-webhook-cert\") pod \"metallb-operator-webhook-server-8598fb747-q99bx\" (UID: \"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab\") " pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:08 crc kubenswrapper[4631]: I1129 04:25:08.076257 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stpvq\" (UniqueName: \"kubernetes.io/projected/e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab-kube-api-access-stpvq\") pod \"metallb-operator-webhook-server-8598fb747-q99bx\" (UID: \"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab\") " pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:08 crc kubenswrapper[4631]: I1129 04:25:08.147436 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:08 crc kubenswrapper[4631]: I1129 04:25:08.334108 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-8598fb747-q99bx"] Nov 29 04:25:08 crc kubenswrapper[4631]: W1129 04:25:08.348590 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode1c922f9_f18e_4d4c_a1ff_b5f7a8bde9ab.slice/crio-0f6a5cfe03b92862749051c94319345388edea92faa69a04875d328a981f556c WatchSource:0}: Error finding container 0f6a5cfe03b92862749051c94319345388edea92faa69a04875d328a981f556c: Status 404 returned error can't find the container with id 0f6a5cfe03b92862749051c94319345388edea92faa69a04875d328a981f556c Nov 29 04:25:08 crc kubenswrapper[4631]: I1129 04:25:08.355306 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" event={"ID":"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab","Type":"ContainerStarted","Data":"0f6a5cfe03b92862749051c94319345388edea92faa69a04875d328a981f556c"} Nov 29 04:25:08 crc kubenswrapper[4631]: I1129 04:25:08.358571 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm"] Nov 29 04:25:09 crc kubenswrapper[4631]: I1129 04:25:09.360536 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" event={"ID":"b9fde2d9-7731-458b-9e00-216a755d629c","Type":"ContainerStarted","Data":"cfe9e77ea363ce63eadbf6d2f63fff0de0aae49df16827771d293e2d5a74399c"} Nov 29 04:25:13 crc kubenswrapper[4631]: I1129 04:25:13.389682 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" event={"ID":"e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab","Type":"ContainerStarted","Data":"11730c0c970fd505d72072e46d229bbc1f9c185582747d5ef416d78dbfec7844"} Nov 29 04:25:13 crc kubenswrapper[4631]: I1129 04:25:13.390246 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:13 crc kubenswrapper[4631]: I1129 04:25:13.391105 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" event={"ID":"b9fde2d9-7731-458b-9e00-216a755d629c","Type":"ContainerStarted","Data":"adc24cda0bfba92082cb463549630f6718f2220e2b72ad52e94c6e959c1bc018"} Nov 29 04:25:13 crc kubenswrapper[4631]: I1129 04:25:13.391222 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:13 crc kubenswrapper[4631]: I1129 04:25:13.408067 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" podStartSLOduration=1.559631582 podStartE2EDuration="6.408052998s" podCreationTimestamp="2025-11-29 04:25:07 +0000 UTC" firstStartedPulling="2025-11-29 04:25:08.351066313 +0000 UTC m=+835.415569827" lastFinishedPulling="2025-11-29 04:25:13.199487729 +0000 UTC m=+840.263991243" observedRunningTime="2025-11-29 04:25:13.404393244 +0000 UTC m=+840.468896758" watchObservedRunningTime="2025-11-29 04:25:13.408052998 +0000 UTC m=+840.472556512" Nov 29 04:25:13 crc kubenswrapper[4631]: I1129 04:25:13.427240 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" podStartSLOduration=3.397733628 podStartE2EDuration="6.427224799s" podCreationTimestamp="2025-11-29 04:25:07 +0000 UTC" firstStartedPulling="2025-11-29 04:25:08.365655798 +0000 UTC m=+835.430159312" lastFinishedPulling="2025-11-29 04:25:11.395146969 +0000 UTC m=+838.459650483" observedRunningTime="2025-11-29 04:25:13.424911906 +0000 UTC m=+840.489415430" watchObservedRunningTime="2025-11-29 04:25:13.427224799 +0000 UTC m=+840.491728313" Nov 29 04:25:20 crc kubenswrapper[4631]: I1129 04:25:20.716233 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:25:20 crc kubenswrapper[4631]: I1129 04:25:20.716723 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:25:20 crc kubenswrapper[4631]: I1129 04:25:20.716773 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:25:20 crc kubenswrapper[4631]: I1129 04:25:20.717316 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d24bc233b5493c7d82c41dde646e52c2ccbd2abaf110835404b67654167e1ec2"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:25:20 crc kubenswrapper[4631]: I1129 04:25:20.717373 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://d24bc233b5493c7d82c41dde646e52c2ccbd2abaf110835404b67654167e1ec2" gracePeriod=600 Nov 29 04:25:21 crc kubenswrapper[4631]: I1129 04:25:21.434988 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="d24bc233b5493c7d82c41dde646e52c2ccbd2abaf110835404b67654167e1ec2" exitCode=0 Nov 29 04:25:21 crc kubenswrapper[4631]: I1129 04:25:21.435063 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"d24bc233b5493c7d82c41dde646e52c2ccbd2abaf110835404b67654167e1ec2"} Nov 29 04:25:21 crc kubenswrapper[4631]: I1129 04:25:21.435397 4631 scope.go:117] "RemoveContainer" containerID="f92a4ea4c8fc751f84f3dd0318393a90ca37c524f672eccb6931f3b74e9f254a" Nov 29 04:25:22 crc kubenswrapper[4631]: I1129 04:25:22.443792 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"0d61c7b70ecd9c7737b4b7e588d56ad7e8044dda6cfe17bbe23a704a996d9bc8"} Nov 29 04:25:28 crc kubenswrapper[4631]: I1129 04:25:28.153387 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-8598fb747-q99bx" Nov 29 04:25:47 crc kubenswrapper[4631]: I1129 04:25:47.926651 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-86d54cd4bb-mfrnm" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.819479 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-zpqkq"] Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.821712 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.824238 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.824274 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.824489 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-pwh2c" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.826277 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z"] Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.826993 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.827881 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.838145 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-reloader\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.838178 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.838201 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkrg2\" (UniqueName: \"kubernetes.io/projected/67c6fb55-67d7-4d86-8071-1a5e2d13e338-kube-api-access-xkrg2\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.838233 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/67c6fb55-67d7-4d86-8071-1a5e2d13e338-frr-startup\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.838249 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d0c7592-e989-41cd-b1bb-4ec52e694973-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-ktl8z\" (UID: \"7d0c7592-e989-41cd-b1bb-4ec52e694973\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.838271 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics-certs\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.838326 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5jb8\" (UniqueName: \"kubernetes.io/projected/7d0c7592-e989-41cd-b1bb-4ec52e694973-kube-api-access-c5jb8\") pod \"frr-k8s-webhook-server-7fcb986d4-ktl8z\" (UID: \"7d0c7592-e989-41cd-b1bb-4ec52e694973\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.838351 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-frr-conf\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.838378 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-frr-sockets\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.845939 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z"] Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.914733 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-rjtb7"] Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.915587 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-rjtb7" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.918830 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.918854 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-hf74c" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.918982 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.919020 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.935792 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-nbzls"] Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.936561 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939546 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5jb8\" (UniqueName: \"kubernetes.io/projected/7d0c7592-e989-41cd-b1bb-4ec52e694973-kube-api-access-c5jb8\") pod \"frr-k8s-webhook-server-7fcb986d4-ktl8z\" (UID: \"7d0c7592-e989-41cd-b1bb-4ec52e694973\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939574 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-frr-conf\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939595 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-memberlist\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939624 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-frr-sockets\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939656 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-reloader\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939675 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939694 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkrg2\" (UniqueName: \"kubernetes.io/projected/67c6fb55-67d7-4d86-8071-1a5e2d13e338-kube-api-access-xkrg2\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939716 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-metallb-excludel2\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939738 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/67c6fb55-67d7-4d86-8071-1a5e2d13e338-frr-startup\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939754 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d0c7592-e989-41cd-b1bb-4ec52e694973-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-ktl8z\" (UID: \"7d0c7592-e989-41cd-b1bb-4ec52e694973\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939773 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics-certs\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939788 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-metrics-certs\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.939808 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jszpn\" (UniqueName: \"kubernetes.io/projected/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-kube-api-access-jszpn\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.940249 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: E1129 04:25:48.940283 4631 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 29 04:25:48 crc kubenswrapper[4631]: E1129 04:25:48.940321 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d0c7592-e989-41cd-b1bb-4ec52e694973-cert podName:7d0c7592-e989-41cd-b1bb-4ec52e694973 nodeName:}" failed. No retries permitted until 2025-11-29 04:25:49.440308961 +0000 UTC m=+876.504812475 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7d0c7592-e989-41cd-b1bb-4ec52e694973-cert") pod "frr-k8s-webhook-server-7fcb986d4-ktl8z" (UID: "7d0c7592-e989-41cd-b1bb-4ec52e694973") : secret "frr-k8s-webhook-server-cert" not found Nov 29 04:25:48 crc kubenswrapper[4631]: E1129 04:25:48.940570 4631 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 29 04:25:48 crc kubenswrapper[4631]: E1129 04:25:48.940594 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics-certs podName:67c6fb55-67d7-4d86-8071-1a5e2d13e338 nodeName:}" failed. No retries permitted until 2025-11-29 04:25:49.440586738 +0000 UTC m=+876.505090242 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics-certs") pod "frr-k8s-zpqkq" (UID: "67c6fb55-67d7-4d86-8071-1a5e2d13e338") : secret "frr-k8s-certs-secret" not found Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.940685 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.940846 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-frr-sockets\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.940878 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-reloader\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.941219 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/67c6fb55-67d7-4d86-8071-1a5e2d13e338-frr-conf\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.941608 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/67c6fb55-67d7-4d86-8071-1a5e2d13e338-frr-startup\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.958429 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-nbzls"] Nov 29 04:25:48 crc kubenswrapper[4631]: I1129 04:25:48.982893 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkrg2\" (UniqueName: \"kubernetes.io/projected/67c6fb55-67d7-4d86-8071-1a5e2d13e338-kube-api-access-xkrg2\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.000039 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5jb8\" (UniqueName: \"kubernetes.io/projected/7d0c7592-e989-41cd-b1bb-4ec52e694973-kube-api-access-c5jb8\") pod \"frr-k8s-webhook-server-7fcb986d4-ktl8z\" (UID: \"7d0c7592-e989-41cd-b1bb-4ec52e694973\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.041430 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-metallb-excludel2\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.041509 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-metrics-certs\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.041535 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2msr\" (UniqueName: \"kubernetes.io/projected/40e9d6d5-fd7e-4962-aace-e0fe711eb77d-kube-api-access-q2msr\") pod \"controller-f8648f98b-nbzls\" (UID: \"40e9d6d5-fd7e-4962-aace-e0fe711eb77d\") " pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.041555 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/40e9d6d5-fd7e-4962-aace-e0fe711eb77d-metrics-certs\") pod \"controller-f8648f98b-nbzls\" (UID: \"40e9d6d5-fd7e-4962-aace-e0fe711eb77d\") " pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.041574 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jszpn\" (UniqueName: \"kubernetes.io/projected/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-kube-api-access-jszpn\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.041616 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-memberlist\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.041632 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40e9d6d5-fd7e-4962-aace-e0fe711eb77d-cert\") pod \"controller-f8648f98b-nbzls\" (UID: \"40e9d6d5-fd7e-4962-aace-e0fe711eb77d\") " pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:49 crc kubenswrapper[4631]: E1129 04:25:49.041769 4631 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 29 04:25:49 crc kubenswrapper[4631]: E1129 04:25:49.041811 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-metrics-certs podName:b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d nodeName:}" failed. No retries permitted until 2025-11-29 04:25:49.541796977 +0000 UTC m=+876.606300491 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-metrics-certs") pod "speaker-rjtb7" (UID: "b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d") : secret "speaker-certs-secret" not found Nov 29 04:25:49 crc kubenswrapper[4631]: E1129 04:25:49.041979 4631 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 29 04:25:49 crc kubenswrapper[4631]: E1129 04:25:49.042047 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-memberlist podName:b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d nodeName:}" failed. No retries permitted until 2025-11-29 04:25:49.542029722 +0000 UTC m=+876.606533236 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-memberlist") pod "speaker-rjtb7" (UID: "b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d") : secret "metallb-memberlist" not found Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.042153 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-metallb-excludel2\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.058187 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jszpn\" (UniqueName: \"kubernetes.io/projected/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-kube-api-access-jszpn\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.142529 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2msr\" (UniqueName: \"kubernetes.io/projected/40e9d6d5-fd7e-4962-aace-e0fe711eb77d-kube-api-access-q2msr\") pod \"controller-f8648f98b-nbzls\" (UID: \"40e9d6d5-fd7e-4962-aace-e0fe711eb77d\") " pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.142567 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/40e9d6d5-fd7e-4962-aace-e0fe711eb77d-metrics-certs\") pod \"controller-f8648f98b-nbzls\" (UID: \"40e9d6d5-fd7e-4962-aace-e0fe711eb77d\") " pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.142609 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40e9d6d5-fd7e-4962-aace-e0fe711eb77d-cert\") pod \"controller-f8648f98b-nbzls\" (UID: \"40e9d6d5-fd7e-4962-aace-e0fe711eb77d\") " pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.145631 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/40e9d6d5-fd7e-4962-aace-e0fe711eb77d-metrics-certs\") pod \"controller-f8648f98b-nbzls\" (UID: \"40e9d6d5-fd7e-4962-aace-e0fe711eb77d\") " pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.146186 4631 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.158753 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40e9d6d5-fd7e-4962-aace-e0fe711eb77d-cert\") pod \"controller-f8648f98b-nbzls\" (UID: \"40e9d6d5-fd7e-4962-aace-e0fe711eb77d\") " pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.160453 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2msr\" (UniqueName: \"kubernetes.io/projected/40e9d6d5-fd7e-4962-aace-e0fe711eb77d-kube-api-access-q2msr\") pod \"controller-f8648f98b-nbzls\" (UID: \"40e9d6d5-fd7e-4962-aace-e0fe711eb77d\") " pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.248371 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.446812 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d0c7592-e989-41cd-b1bb-4ec52e694973-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-ktl8z\" (UID: \"7d0c7592-e989-41cd-b1bb-4ec52e694973\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.447127 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics-certs\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:49 crc kubenswrapper[4631]: E1129 04:25:49.447343 4631 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 29 04:25:49 crc kubenswrapper[4631]: E1129 04:25:49.447398 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics-certs podName:67c6fb55-67d7-4d86-8071-1a5e2d13e338 nodeName:}" failed. No retries permitted until 2025-11-29 04:25:50.447381049 +0000 UTC m=+877.511884563 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics-certs") pod "frr-k8s-zpqkq" (UID: "67c6fb55-67d7-4d86-8071-1a5e2d13e338") : secret "frr-k8s-certs-secret" not found Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.449800 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d0c7592-e989-41cd-b1bb-4ec52e694973-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-ktl8z\" (UID: \"7d0c7592-e989-41cd-b1bb-4ec52e694973\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.470206 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-nbzls"] Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.548431 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-metrics-certs\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.548508 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-memberlist\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:49 crc kubenswrapper[4631]: E1129 04:25:49.548644 4631 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 29 04:25:49 crc kubenswrapper[4631]: E1129 04:25:49.548705 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-memberlist podName:b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d nodeName:}" failed. No retries permitted until 2025-11-29 04:25:50.548688291 +0000 UTC m=+877.613191815 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-memberlist") pod "speaker-rjtb7" (UID: "b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d") : secret "metallb-memberlist" not found Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.554190 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-metrics-certs\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.686799 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-nbzls" event={"ID":"40e9d6d5-fd7e-4962-aace-e0fe711eb77d","Type":"ContainerStarted","Data":"1cce68cdd4072f9cf0bd5d43553f340b949a54365782f01f85365ed0717cdae0"} Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.747449 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:25:49 crc kubenswrapper[4631]: I1129 04:25:49.964507 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z"] Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.459685 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics-certs\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.464970 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/67c6fb55-67d7-4d86-8071-1a5e2d13e338-metrics-certs\") pod \"frr-k8s-zpqkq\" (UID: \"67c6fb55-67d7-4d86-8071-1a5e2d13e338\") " pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.561537 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-memberlist\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.566326 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d-memberlist\") pod \"speaker-rjtb7\" (UID: \"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d\") " pod="metallb-system/speaker-rjtb7" Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.641380 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.722119 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-nbzls" event={"ID":"40e9d6d5-fd7e-4962-aace-e0fe711eb77d","Type":"ContainerStarted","Data":"c40f7be22ebba56a7ec7c5eb416d7f5d828c4708d2cfb04719983f825511f24c"} Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.722160 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-nbzls" event={"ID":"40e9d6d5-fd7e-4962-aace-e0fe711eb77d","Type":"ContainerStarted","Data":"4f54f0d2bdfbbd8db89683c7f7a0c8fc3d291e84e4c70b75fa40bb78710daa99"} Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.722308 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.730282 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-rjtb7" Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.730620 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" event={"ID":"7d0c7592-e989-41cd-b1bb-4ec52e694973","Type":"ContainerStarted","Data":"d69e0a3efed5f33fd38f084f689e4baa1b9f8696a8fa8c19531e1a2ef2f13cf0"} Nov 29 04:25:50 crc kubenswrapper[4631]: I1129 04:25:50.750330 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-nbzls" podStartSLOduration=2.75031243 podStartE2EDuration="2.75031243s" podCreationTimestamp="2025-11-29 04:25:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:25:50.747762891 +0000 UTC m=+877.812266405" watchObservedRunningTime="2025-11-29 04:25:50.75031243 +0000 UTC m=+877.814815944" Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.741573 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-rjtb7" event={"ID":"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d","Type":"ContainerStarted","Data":"b36c3a6027631cdaa2830a02c7529f745ec0abe8e9bf537b2302f403ea8a0c80"} Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.741854 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-rjtb7" event={"ID":"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d","Type":"ContainerStarted","Data":"a9aaf8ab5a4ce1567eba12439d416ddba3e2ffb577cb216eefdcdea4836da4a1"} Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.741864 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-rjtb7" event={"ID":"b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d","Type":"ContainerStarted","Data":"603e0c08068b6e52d688f04b598ad66530ae2be3168b04137f298cd2f5a389d8"} Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.742019 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-rjtb7" Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.745599 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zpqkq" event={"ID":"67c6fb55-67d7-4d86-8071-1a5e2d13e338","Type":"ContainerStarted","Data":"f86324241cec00e9653bf7287bb0ef2452586a34e70889912c36483f7a70f8f0"} Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.762790 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-rjtb7" podStartSLOduration=3.7627761680000003 podStartE2EDuration="3.762776168s" podCreationTimestamp="2025-11-29 04:25:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:25:51.758253304 +0000 UTC m=+878.822756818" watchObservedRunningTime="2025-11-29 04:25:51.762776168 +0000 UTC m=+878.827279682" Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.837707 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-m88lt"] Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.838943 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.846198 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m88lt"] Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.935704 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-catalog-content\") pod \"certified-operators-m88lt\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.935795 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjnbh\" (UniqueName: \"kubernetes.io/projected/d35026a2-bfe7-4a07-8e7c-973a19223966-kube-api-access-fjnbh\") pod \"certified-operators-m88lt\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:51 crc kubenswrapper[4631]: I1129 04:25:51.935856 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-utilities\") pod \"certified-operators-m88lt\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:52 crc kubenswrapper[4631]: I1129 04:25:52.037070 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-utilities\") pod \"certified-operators-m88lt\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:52 crc kubenswrapper[4631]: I1129 04:25:52.037130 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-catalog-content\") pod \"certified-operators-m88lt\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:52 crc kubenswrapper[4631]: I1129 04:25:52.037175 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjnbh\" (UniqueName: \"kubernetes.io/projected/d35026a2-bfe7-4a07-8e7c-973a19223966-kube-api-access-fjnbh\") pod \"certified-operators-m88lt\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:52 crc kubenswrapper[4631]: I1129 04:25:52.037626 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-catalog-content\") pod \"certified-operators-m88lt\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:52 crc kubenswrapper[4631]: I1129 04:25:52.037685 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-utilities\") pod \"certified-operators-m88lt\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:52 crc kubenswrapper[4631]: I1129 04:25:52.086177 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjnbh\" (UniqueName: \"kubernetes.io/projected/d35026a2-bfe7-4a07-8e7c-973a19223966-kube-api-access-fjnbh\") pod \"certified-operators-m88lt\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:52 crc kubenswrapper[4631]: I1129 04:25:52.152274 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:25:52 crc kubenswrapper[4631]: I1129 04:25:52.564553 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m88lt"] Nov 29 04:25:52 crc kubenswrapper[4631]: W1129 04:25:52.573666 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd35026a2_bfe7_4a07_8e7c_973a19223966.slice/crio-350402b8757de66a49176fc379866e71b13280cab44de751336ea21a749ec001 WatchSource:0}: Error finding container 350402b8757de66a49176fc379866e71b13280cab44de751336ea21a749ec001: Status 404 returned error can't find the container with id 350402b8757de66a49176fc379866e71b13280cab44de751336ea21a749ec001 Nov 29 04:25:52 crc kubenswrapper[4631]: I1129 04:25:52.758488 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m88lt" event={"ID":"d35026a2-bfe7-4a07-8e7c-973a19223966","Type":"ContainerStarted","Data":"06151aac35ac607f02feead6eed1aa809e76d7ae8031a473d8bf2c78b37d2628"} Nov 29 04:25:52 crc kubenswrapper[4631]: I1129 04:25:52.758798 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m88lt" event={"ID":"d35026a2-bfe7-4a07-8e7c-973a19223966","Type":"ContainerStarted","Data":"350402b8757de66a49176fc379866e71b13280cab44de751336ea21a749ec001"} Nov 29 04:25:53 crc kubenswrapper[4631]: I1129 04:25:53.774946 4631 generic.go:334] "Generic (PLEG): container finished" podID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerID="06151aac35ac607f02feead6eed1aa809e76d7ae8031a473d8bf2c78b37d2628" exitCode=0 Nov 29 04:25:53 crc kubenswrapper[4631]: I1129 04:25:53.775020 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m88lt" event={"ID":"d35026a2-bfe7-4a07-8e7c-973a19223966","Type":"ContainerDied","Data":"06151aac35ac607f02feead6eed1aa809e76d7ae8031a473d8bf2c78b37d2628"} Nov 29 04:25:53 crc kubenswrapper[4631]: I1129 04:25:53.775283 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m88lt" event={"ID":"d35026a2-bfe7-4a07-8e7c-973a19223966","Type":"ContainerStarted","Data":"b2bc9c0e460e4a691b1104663b5dc81ad6d16efbcacfcdd070cac4ac8db06697"} Nov 29 04:25:54 crc kubenswrapper[4631]: I1129 04:25:54.781627 4631 generic.go:334] "Generic (PLEG): container finished" podID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerID="b2bc9c0e460e4a691b1104663b5dc81ad6d16efbcacfcdd070cac4ac8db06697" exitCode=0 Nov 29 04:25:54 crc kubenswrapper[4631]: I1129 04:25:54.781668 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m88lt" event={"ID":"d35026a2-bfe7-4a07-8e7c-973a19223966","Type":"ContainerDied","Data":"b2bc9c0e460e4a691b1104663b5dc81ad6d16efbcacfcdd070cac4ac8db06697"} Nov 29 04:25:58 crc kubenswrapper[4631]: I1129 04:25:58.805901 4631 generic.go:334] "Generic (PLEG): container finished" podID="67c6fb55-67d7-4d86-8071-1a5e2d13e338" containerID="2b28f055d68e1b47038f09b8f1f5e4d9d2983d516e47682e4348bfcabc7ec55d" exitCode=0 Nov 29 04:25:58 crc kubenswrapper[4631]: I1129 04:25:58.805971 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zpqkq" event={"ID":"67c6fb55-67d7-4d86-8071-1a5e2d13e338","Type":"ContainerDied","Data":"2b28f055d68e1b47038f09b8f1f5e4d9d2983d516e47682e4348bfcabc7ec55d"} Nov 29 04:25:58 crc kubenswrapper[4631]: I1129 04:25:58.808357 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" event={"ID":"7d0c7592-e989-41cd-b1bb-4ec52e694973","Type":"ContainerStarted","Data":"b96bb6ffe2edffc9b6ff6d9192a1ab01be5de5f1ef22563a5aecb83460f2c6f4"} Nov 29 04:25:58 crc kubenswrapper[4631]: I1129 04:25:58.808507 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:25:58 crc kubenswrapper[4631]: I1129 04:25:58.811449 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m88lt" event={"ID":"d35026a2-bfe7-4a07-8e7c-973a19223966","Type":"ContainerStarted","Data":"5704c0aa5b6ff0a568527c95fcb30d23cded1fb2d7a43678d6b38c376a0a744f"} Nov 29 04:25:58 crc kubenswrapper[4631]: I1129 04:25:58.904627 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" podStartSLOduration=2.6665301230000003 podStartE2EDuration="10.904603377s" podCreationTimestamp="2025-11-29 04:25:48 +0000 UTC" firstStartedPulling="2025-11-29 04:25:50.011718645 +0000 UTC m=+877.076222149" lastFinishedPulling="2025-11-29 04:25:58.249791889 +0000 UTC m=+885.314295403" observedRunningTime="2025-11-29 04:25:58.897317249 +0000 UTC m=+885.961820773" watchObservedRunningTime="2025-11-29 04:25:58.904603377 +0000 UTC m=+885.969106901" Nov 29 04:25:58 crc kubenswrapper[4631]: I1129 04:25:58.914852 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-m88lt" podStartSLOduration=2.568046929 podStartE2EDuration="7.914834002s" podCreationTimestamp="2025-11-29 04:25:51 +0000 UTC" firstStartedPulling="2025-11-29 04:25:52.770778923 +0000 UTC m=+879.835282437" lastFinishedPulling="2025-11-29 04:25:58.117566006 +0000 UTC m=+885.182069510" observedRunningTime="2025-11-29 04:25:58.912381556 +0000 UTC m=+885.976885090" watchObservedRunningTime="2025-11-29 04:25:58.914834002 +0000 UTC m=+885.979337516" Nov 29 04:25:59 crc kubenswrapper[4631]: I1129 04:25:59.256215 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-nbzls" Nov 29 04:25:59 crc kubenswrapper[4631]: I1129 04:25:59.820325 4631 generic.go:334] "Generic (PLEG): container finished" podID="67c6fb55-67d7-4d86-8071-1a5e2d13e338" containerID="5272e853e6c26023bc5b366bf268819f094740b1f8d50959c4682ddb390a73bc" exitCode=0 Nov 29 04:25:59 crc kubenswrapper[4631]: I1129 04:25:59.820453 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zpqkq" event={"ID":"67c6fb55-67d7-4d86-8071-1a5e2d13e338","Type":"ContainerDied","Data":"5272e853e6c26023bc5b366bf268819f094740b1f8d50959c4682ddb390a73bc"} Nov 29 04:26:00 crc kubenswrapper[4631]: I1129 04:26:00.756403 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-rjtb7" Nov 29 04:26:00 crc kubenswrapper[4631]: I1129 04:26:00.829955 4631 generic.go:334] "Generic (PLEG): container finished" podID="67c6fb55-67d7-4d86-8071-1a5e2d13e338" containerID="a98127613beeeacfe4f0091ba2cb99e20c8cda215535b3514c9ab25fa31e487c" exitCode=0 Nov 29 04:26:00 crc kubenswrapper[4631]: I1129 04:26:00.829998 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zpqkq" event={"ID":"67c6fb55-67d7-4d86-8071-1a5e2d13e338","Type":"ContainerDied","Data":"a98127613beeeacfe4f0091ba2cb99e20c8cda215535b3514c9ab25fa31e487c"} Nov 29 04:26:01 crc kubenswrapper[4631]: I1129 04:26:01.847106 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zpqkq" event={"ID":"67c6fb55-67d7-4d86-8071-1a5e2d13e338","Type":"ContainerStarted","Data":"11e3581f5e5d224b20d83a44e30b1e8d47d5628a09964befb2ae9fd98c52e9bc"} Nov 29 04:26:01 crc kubenswrapper[4631]: I1129 04:26:01.847384 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zpqkq" event={"ID":"67c6fb55-67d7-4d86-8071-1a5e2d13e338","Type":"ContainerStarted","Data":"9fe75c5ad063ab385ca98ee021fe30601ff970dd166c68a64b8f9e820249cc38"} Nov 29 04:26:01 crc kubenswrapper[4631]: I1129 04:26:01.847394 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zpqkq" event={"ID":"67c6fb55-67d7-4d86-8071-1a5e2d13e338","Type":"ContainerStarted","Data":"93f92b0b2d08b7a30cf80e72413887f555fa8e7098cf79277dc6c7cd47d7a60d"} Nov 29 04:26:01 crc kubenswrapper[4631]: I1129 04:26:01.847404 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zpqkq" event={"ID":"67c6fb55-67d7-4d86-8071-1a5e2d13e338","Type":"ContainerStarted","Data":"e87d0e25edbe597af6138fabd3ba1b7d8fadd0934d623442d937de987969ab88"} Nov 29 04:26:01 crc kubenswrapper[4631]: I1129 04:26:01.847412 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zpqkq" event={"ID":"67c6fb55-67d7-4d86-8071-1a5e2d13e338","Type":"ContainerStarted","Data":"972586d7335bab1846c0080228b63d8424562d9231d74c5928156da86ad089c7"} Nov 29 04:26:01 crc kubenswrapper[4631]: I1129 04:26:01.847421 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zpqkq" event={"ID":"67c6fb55-67d7-4d86-8071-1a5e2d13e338","Type":"ContainerStarted","Data":"d6d007c36fcc0f694eec654564407189da7744bfbbb89549f572a65d30a565c9"} Nov 29 04:26:02 crc kubenswrapper[4631]: I1129 04:26:02.152775 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:26:02 crc kubenswrapper[4631]: I1129 04:26:02.152870 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:26:02 crc kubenswrapper[4631]: I1129 04:26:02.195703 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:26:02 crc kubenswrapper[4631]: I1129 04:26:02.215423 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-zpqkq" podStartSLOduration=6.807146682 podStartE2EDuration="14.215401301s" podCreationTimestamp="2025-11-29 04:25:48 +0000 UTC" firstStartedPulling="2025-11-29 04:25:50.890966507 +0000 UTC m=+877.955470021" lastFinishedPulling="2025-11-29 04:25:58.299221126 +0000 UTC m=+885.363724640" observedRunningTime="2025-11-29 04:26:01.877126286 +0000 UTC m=+888.941629800" watchObservedRunningTime="2025-11-29 04:26:02.215401301 +0000 UTC m=+889.279904825" Nov 29 04:26:02 crc kubenswrapper[4631]: I1129 04:26:02.851272 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:26:03 crc kubenswrapper[4631]: I1129 04:26:03.523239 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-vbmj6"] Nov 29 04:26:03 crc kubenswrapper[4631]: I1129 04:26:03.524789 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vbmj6" Nov 29 04:26:03 crc kubenswrapper[4631]: I1129 04:26:03.527733 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 29 04:26:03 crc kubenswrapper[4631]: I1129 04:26:03.528505 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 29 04:26:03 crc kubenswrapper[4631]: I1129 04:26:03.529630 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-dwskw" Nov 29 04:26:03 crc kubenswrapper[4631]: I1129 04:26:03.531664 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vbmj6"] Nov 29 04:26:03 crc kubenswrapper[4631]: I1129 04:26:03.596099 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvcxr\" (UniqueName: \"kubernetes.io/projected/75387823-fa99-4043-884b-f9055bf62222-kube-api-access-kvcxr\") pod \"openstack-operator-index-vbmj6\" (UID: \"75387823-fa99-4043-884b-f9055bf62222\") " pod="openstack-operators/openstack-operator-index-vbmj6" Nov 29 04:26:03 crc kubenswrapper[4631]: I1129 04:26:03.697660 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvcxr\" (UniqueName: \"kubernetes.io/projected/75387823-fa99-4043-884b-f9055bf62222-kube-api-access-kvcxr\") pod \"openstack-operator-index-vbmj6\" (UID: \"75387823-fa99-4043-884b-f9055bf62222\") " pod="openstack-operators/openstack-operator-index-vbmj6" Nov 29 04:26:03 crc kubenswrapper[4631]: I1129 04:26:03.727627 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvcxr\" (UniqueName: \"kubernetes.io/projected/75387823-fa99-4043-884b-f9055bf62222-kube-api-access-kvcxr\") pod \"openstack-operator-index-vbmj6\" (UID: \"75387823-fa99-4043-884b-f9055bf62222\") " pod="openstack-operators/openstack-operator-index-vbmj6" Nov 29 04:26:03 crc kubenswrapper[4631]: I1129 04:26:03.861577 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vbmj6" Nov 29 04:26:04 crc kubenswrapper[4631]: I1129 04:26:04.061646 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vbmj6"] Nov 29 04:26:04 crc kubenswrapper[4631]: I1129 04:26:04.879101 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vbmj6" event={"ID":"75387823-fa99-4043-884b-f9055bf62222","Type":"ContainerStarted","Data":"a6f2c8475570575f97649ecc0220d7e9df7fff25e37bc1e76978683b60ef83b7"} Nov 29 04:26:05 crc kubenswrapper[4631]: I1129 04:26:05.641872 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:26:05 crc kubenswrapper[4631]: I1129 04:26:05.684390 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:26:06 crc kubenswrapper[4631]: I1129 04:26:06.300488 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-vbmj6"] Nov 29 04:26:06 crc kubenswrapper[4631]: I1129 04:26:06.894782 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-4wzxd"] Nov 29 04:26:06 crc kubenswrapper[4631]: I1129 04:26:06.899034 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-4wzxd" Nov 29 04:26:06 crc kubenswrapper[4631]: I1129 04:26:06.906785 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-4wzxd"] Nov 29 04:26:06 crc kubenswrapper[4631]: I1129 04:26:06.942520 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kv845\" (UniqueName: \"kubernetes.io/projected/d04d138a-44a6-4666-aa58-8a225d975235-kube-api-access-kv845\") pod \"openstack-operator-index-4wzxd\" (UID: \"d04d138a-44a6-4666-aa58-8a225d975235\") " pod="openstack-operators/openstack-operator-index-4wzxd" Nov 29 04:26:07 crc kubenswrapper[4631]: I1129 04:26:07.043267 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kv845\" (UniqueName: \"kubernetes.io/projected/d04d138a-44a6-4666-aa58-8a225d975235-kube-api-access-kv845\") pod \"openstack-operator-index-4wzxd\" (UID: \"d04d138a-44a6-4666-aa58-8a225d975235\") " pod="openstack-operators/openstack-operator-index-4wzxd" Nov 29 04:26:07 crc kubenswrapper[4631]: I1129 04:26:07.072609 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kv845\" (UniqueName: \"kubernetes.io/projected/d04d138a-44a6-4666-aa58-8a225d975235-kube-api-access-kv845\") pod \"openstack-operator-index-4wzxd\" (UID: \"d04d138a-44a6-4666-aa58-8a225d975235\") " pod="openstack-operators/openstack-operator-index-4wzxd" Nov 29 04:26:07 crc kubenswrapper[4631]: I1129 04:26:07.274826 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-4wzxd" Nov 29 04:26:09 crc kubenswrapper[4631]: I1129 04:26:09.763279 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-ktl8z" Nov 29 04:26:09 crc kubenswrapper[4631]: I1129 04:26:09.863243 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-4wzxd"] Nov 29 04:26:09 crc kubenswrapper[4631]: I1129 04:26:09.979342 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vbmj6" event={"ID":"75387823-fa99-4043-884b-f9055bf62222","Type":"ContainerStarted","Data":"37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139"} Nov 29 04:26:09 crc kubenswrapper[4631]: I1129 04:26:09.979398 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-vbmj6" podUID="75387823-fa99-4043-884b-f9055bf62222" containerName="registry-server" containerID="cri-o://37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139" gracePeriod=2 Nov 29 04:26:09 crc kubenswrapper[4631]: I1129 04:26:09.980669 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-4wzxd" event={"ID":"d04d138a-44a6-4666-aa58-8a225d975235","Type":"ContainerStarted","Data":"d7e7133517044dc784f57b2938d55ab89629bea8bae946b04bab91941ee21504"} Nov 29 04:26:09 crc kubenswrapper[4631]: I1129 04:26:09.997101 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-vbmj6" podStartSLOduration=1.397632516 podStartE2EDuration="6.997085123s" podCreationTimestamp="2025-11-29 04:26:03 +0000 UTC" firstStartedPulling="2025-11-29 04:26:04.069609868 +0000 UTC m=+891.134113382" lastFinishedPulling="2025-11-29 04:26:09.669062465 +0000 UTC m=+896.733565989" observedRunningTime="2025-11-29 04:26:09.992417325 +0000 UTC m=+897.056920829" watchObservedRunningTime="2025-11-29 04:26:09.997085123 +0000 UTC m=+897.061588637" Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.499216 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vbmj6" Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.601920 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvcxr\" (UniqueName: \"kubernetes.io/projected/75387823-fa99-4043-884b-f9055bf62222-kube-api-access-kvcxr\") pod \"75387823-fa99-4043-884b-f9055bf62222\" (UID: \"75387823-fa99-4043-884b-f9055bf62222\") " Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.611467 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75387823-fa99-4043-884b-f9055bf62222-kube-api-access-kvcxr" (OuterVolumeSpecName: "kube-api-access-kvcxr") pod "75387823-fa99-4043-884b-f9055bf62222" (UID: "75387823-fa99-4043-884b-f9055bf62222"). InnerVolumeSpecName "kube-api-access-kvcxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.643664 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-zpqkq" Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.703056 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvcxr\" (UniqueName: \"kubernetes.io/projected/75387823-fa99-4043-884b-f9055bf62222-kube-api-access-kvcxr\") on node \"crc\" DevicePath \"\"" Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.987691 4631 generic.go:334] "Generic (PLEG): container finished" podID="75387823-fa99-4043-884b-f9055bf62222" containerID="37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139" exitCode=0 Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.987980 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vbmj6" event={"ID":"75387823-fa99-4043-884b-f9055bf62222","Type":"ContainerDied","Data":"37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139"} Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.988010 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vbmj6" event={"ID":"75387823-fa99-4043-884b-f9055bf62222","Type":"ContainerDied","Data":"a6f2c8475570575f97649ecc0220d7e9df7fff25e37bc1e76978683b60ef83b7"} Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.988030 4631 scope.go:117] "RemoveContainer" containerID="37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139" Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.988138 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vbmj6" Nov 29 04:26:10 crc kubenswrapper[4631]: I1129 04:26:10.991904 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-4wzxd" event={"ID":"d04d138a-44a6-4666-aa58-8a225d975235","Type":"ContainerStarted","Data":"b5945f69c93b4d8a60252bceaed8786d990253f8e63dce8636d5fde69b69f22b"} Nov 29 04:26:11 crc kubenswrapper[4631]: I1129 04:26:11.010660 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-4wzxd" podStartSLOduration=4.949912818 podStartE2EDuration="5.010624705s" podCreationTimestamp="2025-11-29 04:26:06 +0000 UTC" firstStartedPulling="2025-11-29 04:26:09.884520753 +0000 UTC m=+896.949024267" lastFinishedPulling="2025-11-29 04:26:09.94523264 +0000 UTC m=+897.009736154" observedRunningTime="2025-11-29 04:26:11.006454159 +0000 UTC m=+898.070957683" watchObservedRunningTime="2025-11-29 04:26:11.010624705 +0000 UTC m=+898.075128219" Nov 29 04:26:11 crc kubenswrapper[4631]: I1129 04:26:11.016176 4631 scope.go:117] "RemoveContainer" containerID="37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139" Nov 29 04:26:11 crc kubenswrapper[4631]: E1129 04:26:11.016739 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139\": container with ID starting with 37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139 not found: ID does not exist" containerID="37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139" Nov 29 04:26:11 crc kubenswrapper[4631]: I1129 04:26:11.016775 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139"} err="failed to get container status \"37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139\": rpc error: code = NotFound desc = could not find container \"37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139\": container with ID starting with 37197e7db8e916e0038ca09d7cf5327510ae97df4b30dd2262016fad5e208139 not found: ID does not exist" Nov 29 04:26:11 crc kubenswrapper[4631]: I1129 04:26:11.024710 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-vbmj6"] Nov 29 04:26:11 crc kubenswrapper[4631]: I1129 04:26:11.029072 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-vbmj6"] Nov 29 04:26:11 crc kubenswrapper[4631]: I1129 04:26:11.229043 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75387823-fa99-4043-884b-f9055bf62222" path="/var/lib/kubelet/pods/75387823-fa99-4043-884b-f9055bf62222/volumes" Nov 29 04:26:12 crc kubenswrapper[4631]: I1129 04:26:12.211924 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:26:14 crc kubenswrapper[4631]: I1129 04:26:14.895073 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m88lt"] Nov 29 04:26:14 crc kubenswrapper[4631]: I1129 04:26:14.896186 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-m88lt" podUID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerName="registry-server" containerID="cri-o://5704c0aa5b6ff0a568527c95fcb30d23cded1fb2d7a43678d6b38c376a0a744f" gracePeriod=2 Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.019496 4631 generic.go:334] "Generic (PLEG): container finished" podID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerID="5704c0aa5b6ff0a568527c95fcb30d23cded1fb2d7a43678d6b38c376a0a744f" exitCode=0 Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.019699 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m88lt" event={"ID":"d35026a2-bfe7-4a07-8e7c-973a19223966","Type":"ContainerDied","Data":"5704c0aa5b6ff0a568527c95fcb30d23cded1fb2d7a43678d6b38c376a0a744f"} Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.257205 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.285412 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-catalog-content\") pod \"d35026a2-bfe7-4a07-8e7c-973a19223966\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.285728 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjnbh\" (UniqueName: \"kubernetes.io/projected/d35026a2-bfe7-4a07-8e7c-973a19223966-kube-api-access-fjnbh\") pod \"d35026a2-bfe7-4a07-8e7c-973a19223966\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.285863 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-utilities\") pod \"d35026a2-bfe7-4a07-8e7c-973a19223966\" (UID: \"d35026a2-bfe7-4a07-8e7c-973a19223966\") " Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.286442 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-utilities" (OuterVolumeSpecName: "utilities") pod "d35026a2-bfe7-4a07-8e7c-973a19223966" (UID: "d35026a2-bfe7-4a07-8e7c-973a19223966"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.294610 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d35026a2-bfe7-4a07-8e7c-973a19223966-kube-api-access-fjnbh" (OuterVolumeSpecName: "kube-api-access-fjnbh") pod "d35026a2-bfe7-4a07-8e7c-973a19223966" (UID: "d35026a2-bfe7-4a07-8e7c-973a19223966"). InnerVolumeSpecName "kube-api-access-fjnbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.358916 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d35026a2-bfe7-4a07-8e7c-973a19223966" (UID: "d35026a2-bfe7-4a07-8e7c-973a19223966"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.386954 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.386994 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjnbh\" (UniqueName: \"kubernetes.io/projected/d35026a2-bfe7-4a07-8e7c-973a19223966-kube-api-access-fjnbh\") on node \"crc\" DevicePath \"\"" Nov 29 04:26:15 crc kubenswrapper[4631]: I1129 04:26:15.387006 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d35026a2-bfe7-4a07-8e7c-973a19223966-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:26:16 crc kubenswrapper[4631]: I1129 04:26:16.031239 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m88lt" event={"ID":"d35026a2-bfe7-4a07-8e7c-973a19223966","Type":"ContainerDied","Data":"350402b8757de66a49176fc379866e71b13280cab44de751336ea21a749ec001"} Nov 29 04:26:16 crc kubenswrapper[4631]: I1129 04:26:16.031307 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m88lt" Nov 29 04:26:16 crc kubenswrapper[4631]: I1129 04:26:16.031325 4631 scope.go:117] "RemoveContainer" containerID="5704c0aa5b6ff0a568527c95fcb30d23cded1fb2d7a43678d6b38c376a0a744f" Nov 29 04:26:16 crc kubenswrapper[4631]: I1129 04:26:16.067750 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m88lt"] Nov 29 04:26:16 crc kubenswrapper[4631]: I1129 04:26:16.072355 4631 scope.go:117] "RemoveContainer" containerID="b2bc9c0e460e4a691b1104663b5dc81ad6d16efbcacfcdd070cac4ac8db06697" Nov 29 04:26:16 crc kubenswrapper[4631]: I1129 04:26:16.079872 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-m88lt"] Nov 29 04:26:16 crc kubenswrapper[4631]: I1129 04:26:16.100157 4631 scope.go:117] "RemoveContainer" containerID="06151aac35ac607f02feead6eed1aa809e76d7ae8031a473d8bf2c78b37d2628" Nov 29 04:26:17 crc kubenswrapper[4631]: I1129 04:26:17.226241 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d35026a2-bfe7-4a07-8e7c-973a19223966" path="/var/lib/kubelet/pods/d35026a2-bfe7-4a07-8e7c-973a19223966/volumes" Nov 29 04:26:17 crc kubenswrapper[4631]: I1129 04:26:17.276469 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-4wzxd" Nov 29 04:26:17 crc kubenswrapper[4631]: I1129 04:26:17.276553 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-4wzxd" Nov 29 04:26:17 crc kubenswrapper[4631]: I1129 04:26:17.324389 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-4wzxd" Nov 29 04:26:18 crc kubenswrapper[4631]: I1129 04:26:18.088722 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-4wzxd" Nov 29 04:26:19 crc kubenswrapper[4631]: I1129 04:26:19.953091 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922"] Nov 29 04:26:19 crc kubenswrapper[4631]: E1129 04:26:19.953616 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerName="extract-content" Nov 29 04:26:19 crc kubenswrapper[4631]: I1129 04:26:19.953628 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerName="extract-content" Nov 29 04:26:19 crc kubenswrapper[4631]: E1129 04:26:19.953647 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75387823-fa99-4043-884b-f9055bf62222" containerName="registry-server" Nov 29 04:26:19 crc kubenswrapper[4631]: I1129 04:26:19.953653 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="75387823-fa99-4043-884b-f9055bf62222" containerName="registry-server" Nov 29 04:26:19 crc kubenswrapper[4631]: E1129 04:26:19.953661 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerName="extract-utilities" Nov 29 04:26:19 crc kubenswrapper[4631]: I1129 04:26:19.953667 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerName="extract-utilities" Nov 29 04:26:19 crc kubenswrapper[4631]: E1129 04:26:19.953674 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerName="registry-server" Nov 29 04:26:19 crc kubenswrapper[4631]: I1129 04:26:19.953679 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerName="registry-server" Nov 29 04:26:19 crc kubenswrapper[4631]: I1129 04:26:19.953769 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="75387823-fa99-4043-884b-f9055bf62222" containerName="registry-server" Nov 29 04:26:19 crc kubenswrapper[4631]: I1129 04:26:19.953779 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="d35026a2-bfe7-4a07-8e7c-973a19223966" containerName="registry-server" Nov 29 04:26:19 crc kubenswrapper[4631]: I1129 04:26:19.954542 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:19 crc kubenswrapper[4631]: I1129 04:26:19.956963 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-x2bz6" Nov 29 04:26:19 crc kubenswrapper[4631]: I1129 04:26:19.963509 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922"] Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.060102 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-bundle\") pod \"76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.060140 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-util\") pod \"76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.060159 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vqnc\" (UniqueName: \"kubernetes.io/projected/0cb55e1f-fe62-4278-8b61-a5836e3a6946-kube-api-access-2vqnc\") pod \"76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.161623 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-bundle\") pod \"76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.161658 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vqnc\" (UniqueName: \"kubernetes.io/projected/0cb55e1f-fe62-4278-8b61-a5836e3a6946-kube-api-access-2vqnc\") pod \"76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.161676 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-util\") pod \"76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.162043 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-util\") pod \"76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.162233 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-bundle\") pod \"76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.218195 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vqnc\" (UniqueName: \"kubernetes.io/projected/0cb55e1f-fe62-4278-8b61-a5836e3a6946-kube-api-access-2vqnc\") pod \"76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.269309 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:20 crc kubenswrapper[4631]: I1129 04:26:20.676833 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922"] Nov 29 04:26:20 crc kubenswrapper[4631]: W1129 04:26:20.687872 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0cb55e1f_fe62_4278_8b61_a5836e3a6946.slice/crio-236fddb8582d9ba2edd0d17da1cdaf20677f6c812b634ec88d23fcc4647ad5cb WatchSource:0}: Error finding container 236fddb8582d9ba2edd0d17da1cdaf20677f6c812b634ec88d23fcc4647ad5cb: Status 404 returned error can't find the container with id 236fddb8582d9ba2edd0d17da1cdaf20677f6c812b634ec88d23fcc4647ad5cb Nov 29 04:26:21 crc kubenswrapper[4631]: I1129 04:26:21.067913 4631 generic.go:334] "Generic (PLEG): container finished" podID="0cb55e1f-fe62-4278-8b61-a5836e3a6946" containerID="80657ff3fb19b73c8744542f688bad15637bc6df60c8362f3ca9ccb55cfa5f81" exitCode=0 Nov 29 04:26:21 crc kubenswrapper[4631]: I1129 04:26:21.068143 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" event={"ID":"0cb55e1f-fe62-4278-8b61-a5836e3a6946","Type":"ContainerDied","Data":"80657ff3fb19b73c8744542f688bad15637bc6df60c8362f3ca9ccb55cfa5f81"} Nov 29 04:26:21 crc kubenswrapper[4631]: I1129 04:26:21.068260 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" event={"ID":"0cb55e1f-fe62-4278-8b61-a5836e3a6946","Type":"ContainerStarted","Data":"236fddb8582d9ba2edd0d17da1cdaf20677f6c812b634ec88d23fcc4647ad5cb"} Nov 29 04:26:22 crc kubenswrapper[4631]: I1129 04:26:22.078213 4631 generic.go:334] "Generic (PLEG): container finished" podID="0cb55e1f-fe62-4278-8b61-a5836e3a6946" containerID="f1364a5c5c6e07883830d69940434cb7012802e998db16de87f1774ef867298c" exitCode=0 Nov 29 04:26:22 crc kubenswrapper[4631]: I1129 04:26:22.078366 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" event={"ID":"0cb55e1f-fe62-4278-8b61-a5836e3a6946","Type":"ContainerDied","Data":"f1364a5c5c6e07883830d69940434cb7012802e998db16de87f1774ef867298c"} Nov 29 04:26:23 crc kubenswrapper[4631]: I1129 04:26:23.088220 4631 generic.go:334] "Generic (PLEG): container finished" podID="0cb55e1f-fe62-4278-8b61-a5836e3a6946" containerID="73994d81967138e8e6df13fb136e747ca4ed28e273713a99aedec3f8c4830b0a" exitCode=0 Nov 29 04:26:23 crc kubenswrapper[4631]: I1129 04:26:23.088317 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" event={"ID":"0cb55e1f-fe62-4278-8b61-a5836e3a6946","Type":"ContainerDied","Data":"73994d81967138e8e6df13fb136e747ca4ed28e273713a99aedec3f8c4830b0a"} Nov 29 04:26:24 crc kubenswrapper[4631]: I1129 04:26:24.427887 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:24 crc kubenswrapper[4631]: I1129 04:26:24.524222 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vqnc\" (UniqueName: \"kubernetes.io/projected/0cb55e1f-fe62-4278-8b61-a5836e3a6946-kube-api-access-2vqnc\") pod \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " Nov 29 04:26:24 crc kubenswrapper[4631]: I1129 04:26:24.524306 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-bundle\") pod \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " Nov 29 04:26:24 crc kubenswrapper[4631]: I1129 04:26:24.524396 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-util\") pod \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\" (UID: \"0cb55e1f-fe62-4278-8b61-a5836e3a6946\") " Nov 29 04:26:24 crc kubenswrapper[4631]: I1129 04:26:24.525185 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-bundle" (OuterVolumeSpecName: "bundle") pod "0cb55e1f-fe62-4278-8b61-a5836e3a6946" (UID: "0cb55e1f-fe62-4278-8b61-a5836e3a6946"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:26:24 crc kubenswrapper[4631]: I1129 04:26:24.530200 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cb55e1f-fe62-4278-8b61-a5836e3a6946-kube-api-access-2vqnc" (OuterVolumeSpecName: "kube-api-access-2vqnc") pod "0cb55e1f-fe62-4278-8b61-a5836e3a6946" (UID: "0cb55e1f-fe62-4278-8b61-a5836e3a6946"). InnerVolumeSpecName "kube-api-access-2vqnc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:26:24 crc kubenswrapper[4631]: I1129 04:26:24.537165 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-util" (OuterVolumeSpecName: "util") pod "0cb55e1f-fe62-4278-8b61-a5836e3a6946" (UID: "0cb55e1f-fe62-4278-8b61-a5836e3a6946"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:26:24 crc kubenswrapper[4631]: I1129 04:26:24.625981 4631 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-util\") on node \"crc\" DevicePath \"\"" Nov 29 04:26:24 crc kubenswrapper[4631]: I1129 04:26:24.626029 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vqnc\" (UniqueName: \"kubernetes.io/projected/0cb55e1f-fe62-4278-8b61-a5836e3a6946-kube-api-access-2vqnc\") on node \"crc\" DevicePath \"\"" Nov 29 04:26:24 crc kubenswrapper[4631]: I1129 04:26:24.626049 4631 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0cb55e1f-fe62-4278-8b61-a5836e3a6946-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:26:25 crc kubenswrapper[4631]: I1129 04:26:25.106726 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" event={"ID":"0cb55e1f-fe62-4278-8b61-a5836e3a6946","Type":"ContainerDied","Data":"236fddb8582d9ba2edd0d17da1cdaf20677f6c812b634ec88d23fcc4647ad5cb"} Nov 29 04:26:25 crc kubenswrapper[4631]: I1129 04:26:25.106781 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="236fddb8582d9ba2edd0d17da1cdaf20677f6c812b634ec88d23fcc4647ad5cb" Nov 29 04:26:25 crc kubenswrapper[4631]: I1129 04:26:25.106895 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.505269 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2"] Nov 29 04:26:28 crc kubenswrapper[4631]: E1129 04:26:28.507199 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cb55e1f-fe62-4278-8b61-a5836e3a6946" containerName="util" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.507244 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cb55e1f-fe62-4278-8b61-a5836e3a6946" containerName="util" Nov 29 04:26:28 crc kubenswrapper[4631]: E1129 04:26:28.507261 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cb55e1f-fe62-4278-8b61-a5836e3a6946" containerName="pull" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.507268 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cb55e1f-fe62-4278-8b61-a5836e3a6946" containerName="pull" Nov 29 04:26:28 crc kubenswrapper[4631]: E1129 04:26:28.507280 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cb55e1f-fe62-4278-8b61-a5836e3a6946" containerName="extract" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.507287 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cb55e1f-fe62-4278-8b61-a5836e3a6946" containerName="extract" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.507537 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cb55e1f-fe62-4278-8b61-a5836e3a6946" containerName="extract" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.508129 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.518749 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-dflbh" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.527093 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2"] Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.582066 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jzwr\" (UniqueName: \"kubernetes.io/projected/d0f57542-df49-4195-91b5-1fc784cba518-kube-api-access-2jzwr\") pod \"openstack-operator-controller-operator-5478ff79b4-nggr2\" (UID: \"d0f57542-df49-4195-91b5-1fc784cba518\") " pod="openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.683399 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jzwr\" (UniqueName: \"kubernetes.io/projected/d0f57542-df49-4195-91b5-1fc784cba518-kube-api-access-2jzwr\") pod \"openstack-operator-controller-operator-5478ff79b4-nggr2\" (UID: \"d0f57542-df49-4195-91b5-1fc784cba518\") " pod="openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.705638 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jzwr\" (UniqueName: \"kubernetes.io/projected/d0f57542-df49-4195-91b5-1fc784cba518-kube-api-access-2jzwr\") pod \"openstack-operator-controller-operator-5478ff79b4-nggr2\" (UID: \"d0f57542-df49-4195-91b5-1fc784cba518\") " pod="openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2" Nov 29 04:26:28 crc kubenswrapper[4631]: I1129 04:26:28.831538 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2" Nov 29 04:26:29 crc kubenswrapper[4631]: I1129 04:26:29.269191 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2"] Nov 29 04:26:29 crc kubenswrapper[4631]: W1129 04:26:29.271495 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0f57542_df49_4195_91b5_1fc784cba518.slice/crio-85c694070137ff9db9ae2dc4686a6391aa3b1b1d448a3423d29ce20d66dabda0 WatchSource:0}: Error finding container 85c694070137ff9db9ae2dc4686a6391aa3b1b1d448a3423d29ce20d66dabda0: Status 404 returned error can't find the container with id 85c694070137ff9db9ae2dc4686a6391aa3b1b1d448a3423d29ce20d66dabda0 Nov 29 04:26:30 crc kubenswrapper[4631]: I1129 04:26:30.136972 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2" event={"ID":"d0f57542-df49-4195-91b5-1fc784cba518","Type":"ContainerStarted","Data":"85c694070137ff9db9ae2dc4686a6391aa3b1b1d448a3423d29ce20d66dabda0"} Nov 29 04:26:30 crc kubenswrapper[4631]: I1129 04:26:30.926026 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kh49q"] Nov 29 04:26:30 crc kubenswrapper[4631]: I1129 04:26:30.929483 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:30 crc kubenswrapper[4631]: I1129 04:26:30.951897 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kh49q"] Nov 29 04:26:31 crc kubenswrapper[4631]: I1129 04:26:31.116451 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-catalog-content\") pod \"redhat-marketplace-kh49q\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:31 crc kubenswrapper[4631]: I1129 04:26:31.116508 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd4zl\" (UniqueName: \"kubernetes.io/projected/17a1663b-bb90-4718-891e-2a3a1c33cbb1-kube-api-access-hd4zl\") pod \"redhat-marketplace-kh49q\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:31 crc kubenswrapper[4631]: I1129 04:26:31.116545 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-utilities\") pod \"redhat-marketplace-kh49q\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:31 crc kubenswrapper[4631]: I1129 04:26:31.217128 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-utilities\") pod \"redhat-marketplace-kh49q\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:31 crc kubenswrapper[4631]: I1129 04:26:31.217212 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-catalog-content\") pod \"redhat-marketplace-kh49q\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:31 crc kubenswrapper[4631]: I1129 04:26:31.217248 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd4zl\" (UniqueName: \"kubernetes.io/projected/17a1663b-bb90-4718-891e-2a3a1c33cbb1-kube-api-access-hd4zl\") pod \"redhat-marketplace-kh49q\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:31 crc kubenswrapper[4631]: I1129 04:26:31.217751 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-catalog-content\") pod \"redhat-marketplace-kh49q\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:31 crc kubenswrapper[4631]: I1129 04:26:31.217773 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-utilities\") pod \"redhat-marketplace-kh49q\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:31 crc kubenswrapper[4631]: I1129 04:26:31.243699 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd4zl\" (UniqueName: \"kubernetes.io/projected/17a1663b-bb90-4718-891e-2a3a1c33cbb1-kube-api-access-hd4zl\") pod \"redhat-marketplace-kh49q\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:31 crc kubenswrapper[4631]: I1129 04:26:31.249885 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:35 crc kubenswrapper[4631]: I1129 04:26:35.990844 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kh49q"] Nov 29 04:26:36 crc kubenswrapper[4631]: I1129 04:26:36.187851 4631 generic.go:334] "Generic (PLEG): container finished" podID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerID="0b0805a0364872a829f93a492f44abd74d74aebd98e30dbc4652522bd843af53" exitCode=0 Nov 29 04:26:36 crc kubenswrapper[4631]: I1129 04:26:36.187940 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh49q" event={"ID":"17a1663b-bb90-4718-891e-2a3a1c33cbb1","Type":"ContainerDied","Data":"0b0805a0364872a829f93a492f44abd74d74aebd98e30dbc4652522bd843af53"} Nov 29 04:26:36 crc kubenswrapper[4631]: I1129 04:26:36.187971 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh49q" event={"ID":"17a1663b-bb90-4718-891e-2a3a1c33cbb1","Type":"ContainerStarted","Data":"541419b5e3bf2ab593339e26d11b477fc88c25cd1615f724999a9853005bbfb4"} Nov 29 04:26:36 crc kubenswrapper[4631]: I1129 04:26:36.190858 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2" event={"ID":"d0f57542-df49-4195-91b5-1fc784cba518","Type":"ContainerStarted","Data":"1f6b6036fa6f6979a486bfb9b589e79053102c8a9fedd37c4791e7a89b8fba3e"} Nov 29 04:26:36 crc kubenswrapper[4631]: I1129 04:26:36.191147 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2" Nov 29 04:26:36 crc kubenswrapper[4631]: I1129 04:26:36.237976 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2" podStartSLOduration=1.7284727069999999 podStartE2EDuration="8.237954865s" podCreationTimestamp="2025-11-29 04:26:28 +0000 UTC" firstStartedPulling="2025-11-29 04:26:29.272656988 +0000 UTC m=+916.337160502" lastFinishedPulling="2025-11-29 04:26:35.782139146 +0000 UTC m=+922.846642660" observedRunningTime="2025-11-29 04:26:36.231718441 +0000 UTC m=+923.296221955" watchObservedRunningTime="2025-11-29 04:26:36.237954865 +0000 UTC m=+923.302458389" Nov 29 04:26:38 crc kubenswrapper[4631]: I1129 04:26:38.206381 4631 generic.go:334] "Generic (PLEG): container finished" podID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerID="05450d02fad38c2d6db487213337ca161e49149683838e225cc9a630e8189950" exitCode=0 Nov 29 04:26:38 crc kubenswrapper[4631]: I1129 04:26:38.206481 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh49q" event={"ID":"17a1663b-bb90-4718-891e-2a3a1c33cbb1","Type":"ContainerDied","Data":"05450d02fad38c2d6db487213337ca161e49149683838e225cc9a630e8189950"} Nov 29 04:26:39 crc kubenswrapper[4631]: I1129 04:26:39.226591 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh49q" event={"ID":"17a1663b-bb90-4718-891e-2a3a1c33cbb1","Type":"ContainerStarted","Data":"66f2f4e560a7ca8dd9963678c3f4fa8ea572ec2e3f14f1436c8a5b4081e81c22"} Nov 29 04:26:39 crc kubenswrapper[4631]: I1129 04:26:39.246897 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kh49q" podStartSLOduration=6.498029999 podStartE2EDuration="9.246879422s" podCreationTimestamp="2025-11-29 04:26:30 +0000 UTC" firstStartedPulling="2025-11-29 04:26:36.190076553 +0000 UTC m=+923.254580057" lastFinishedPulling="2025-11-29 04:26:38.938925966 +0000 UTC m=+926.003429480" observedRunningTime="2025-11-29 04:26:39.245827108 +0000 UTC m=+926.310330612" watchObservedRunningTime="2025-11-29 04:26:39.246879422 +0000 UTC m=+926.311382936" Nov 29 04:26:41 crc kubenswrapper[4631]: I1129 04:26:41.252897 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:41 crc kubenswrapper[4631]: I1129 04:26:41.254689 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:41 crc kubenswrapper[4631]: I1129 04:26:41.316272 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:48 crc kubenswrapper[4631]: I1129 04:26:48.834729 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5478ff79b4-nggr2" Nov 29 04:26:51 crc kubenswrapper[4631]: I1129 04:26:51.331668 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:51 crc kubenswrapper[4631]: I1129 04:26:51.393479 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kh49q"] Nov 29 04:26:52 crc kubenswrapper[4631]: I1129 04:26:52.324107 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kh49q" podUID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerName="registry-server" containerID="cri-o://66f2f4e560a7ca8dd9963678c3f4fa8ea572ec2e3f14f1436c8a5b4081e81c22" gracePeriod=2 Nov 29 04:26:53 crc kubenswrapper[4631]: I1129 04:26:53.336099 4631 generic.go:334] "Generic (PLEG): container finished" podID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerID="66f2f4e560a7ca8dd9963678c3f4fa8ea572ec2e3f14f1436c8a5b4081e81c22" exitCode=0 Nov 29 04:26:53 crc kubenswrapper[4631]: I1129 04:26:53.336161 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh49q" event={"ID":"17a1663b-bb90-4718-891e-2a3a1c33cbb1","Type":"ContainerDied","Data":"66f2f4e560a7ca8dd9963678c3f4fa8ea572ec2e3f14f1436c8a5b4081e81c22"} Nov 29 04:26:54 crc kubenswrapper[4631]: I1129 04:26:54.547550 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:54 crc kubenswrapper[4631]: I1129 04:26:54.567938 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hd4zl\" (UniqueName: \"kubernetes.io/projected/17a1663b-bb90-4718-891e-2a3a1c33cbb1-kube-api-access-hd4zl\") pod \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " Nov 29 04:26:54 crc kubenswrapper[4631]: I1129 04:26:54.568180 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-utilities\") pod \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " Nov 29 04:26:54 crc kubenswrapper[4631]: I1129 04:26:54.568267 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-catalog-content\") pod \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\" (UID: \"17a1663b-bb90-4718-891e-2a3a1c33cbb1\") " Nov 29 04:26:54 crc kubenswrapper[4631]: I1129 04:26:54.570151 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-utilities" (OuterVolumeSpecName: "utilities") pod "17a1663b-bb90-4718-891e-2a3a1c33cbb1" (UID: "17a1663b-bb90-4718-891e-2a3a1c33cbb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:26:54 crc kubenswrapper[4631]: I1129 04:26:54.583488 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17a1663b-bb90-4718-891e-2a3a1c33cbb1-kube-api-access-hd4zl" (OuterVolumeSpecName: "kube-api-access-hd4zl") pod "17a1663b-bb90-4718-891e-2a3a1c33cbb1" (UID: "17a1663b-bb90-4718-891e-2a3a1c33cbb1"). InnerVolumeSpecName "kube-api-access-hd4zl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:26:54 crc kubenswrapper[4631]: I1129 04:26:54.591004 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "17a1663b-bb90-4718-891e-2a3a1c33cbb1" (UID: "17a1663b-bb90-4718-891e-2a3a1c33cbb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:26:54 crc kubenswrapper[4631]: I1129 04:26:54.671706 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:26:54 crc kubenswrapper[4631]: I1129 04:26:54.671740 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hd4zl\" (UniqueName: \"kubernetes.io/projected/17a1663b-bb90-4718-891e-2a3a1c33cbb1-kube-api-access-hd4zl\") on node \"crc\" DevicePath \"\"" Nov 29 04:26:54 crc kubenswrapper[4631]: I1129 04:26:54.671752 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17a1663b-bb90-4718-891e-2a3a1c33cbb1-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:26:55 crc kubenswrapper[4631]: I1129 04:26:55.352469 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kh49q" event={"ID":"17a1663b-bb90-4718-891e-2a3a1c33cbb1","Type":"ContainerDied","Data":"541419b5e3bf2ab593339e26d11b477fc88c25cd1615f724999a9853005bbfb4"} Nov 29 04:26:55 crc kubenswrapper[4631]: I1129 04:26:55.352514 4631 scope.go:117] "RemoveContainer" containerID="66f2f4e560a7ca8dd9963678c3f4fa8ea572ec2e3f14f1436c8a5b4081e81c22" Nov 29 04:26:55 crc kubenswrapper[4631]: I1129 04:26:55.352610 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kh49q" Nov 29 04:26:55 crc kubenswrapper[4631]: I1129 04:26:55.367072 4631 scope.go:117] "RemoveContainer" containerID="05450d02fad38c2d6db487213337ca161e49149683838e225cc9a630e8189950" Nov 29 04:26:55 crc kubenswrapper[4631]: I1129 04:26:55.379346 4631 scope.go:117] "RemoveContainer" containerID="0b0805a0364872a829f93a492f44abd74d74aebd98e30dbc4652522bd843af53" Nov 29 04:26:55 crc kubenswrapper[4631]: I1129 04:26:55.380375 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kh49q"] Nov 29 04:26:55 crc kubenswrapper[4631]: I1129 04:26:55.384857 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kh49q"] Nov 29 04:26:57 crc kubenswrapper[4631]: I1129 04:26:57.224849 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" path="/var/lib/kubelet/pods/17a1663b-bb90-4718-891e-2a3a1c33cbb1/volumes" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.570678 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9"] Nov 29 04:27:07 crc kubenswrapper[4631]: E1129 04:27:07.571383 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerName="registry-server" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.571397 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerName="registry-server" Nov 29 04:27:07 crc kubenswrapper[4631]: E1129 04:27:07.571413 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerName="extract-content" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.571421 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerName="extract-content" Nov 29 04:27:07 crc kubenswrapper[4631]: E1129 04:27:07.571434 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerName="extract-utilities" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.571443 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerName="extract-utilities" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.571565 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="17a1663b-bb90-4718-891e-2a3a1c33cbb1" containerName="registry-server" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.572206 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.574749 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-bkwt2" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.579832 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.598600 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.599549 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.601884 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-8blqp" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.619663 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.661648 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nx78h\" (UniqueName: \"kubernetes.io/projected/51f70cd4-a679-426f-9467-1702bb980ada-kube-api-access-nx78h\") pod \"barbican-operator-controller-manager-7d9dfd778-dp4b9\" (UID: \"51f70cd4-a679-426f-9467-1702bb980ada\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.661713 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckwvr\" (UniqueName: \"kubernetes.io/projected/75e40b24-8291-44fe-bd37-97d493e2c136-kube-api-access-ckwvr\") pod \"cinder-operator-controller-manager-859b6ccc6-q4tvc\" (UID: \"75e40b24-8291-44fe-bd37-97d493e2c136\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.695389 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.696410 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.698745 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-26ss6" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.702039 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.706717 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.707700 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.710188 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-6mdnc" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.711375 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.748851 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.749963 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.755100 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-qsqht" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.757237 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.758192 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.762828 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-d4xr5" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.763618 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49s9g\" (UniqueName: \"kubernetes.io/projected/7214fe12-0140-464c-a856-b1b5482bb635-kube-api-access-49s9g\") pod \"glance-operator-controller-manager-668d9c48b9-md78v\" (UID: \"7214fe12-0140-464c-a856-b1b5482bb635\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.763660 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpq78\" (UniqueName: \"kubernetes.io/projected/fc3a4db2-6980-4bc4-aa20-8340eecc513e-kube-api-access-cpq78\") pod \"designate-operator-controller-manager-78b4bc895b-kzfsd\" (UID: \"fc3a4db2-6980-4bc4-aa20-8340eecc513e\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.763728 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nx78h\" (UniqueName: \"kubernetes.io/projected/51f70cd4-a679-426f-9467-1702bb980ada-kube-api-access-nx78h\") pod \"barbican-operator-controller-manager-7d9dfd778-dp4b9\" (UID: \"51f70cd4-a679-426f-9467-1702bb980ada\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.763747 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckwvr\" (UniqueName: \"kubernetes.io/projected/75e40b24-8291-44fe-bd37-97d493e2c136-kube-api-access-ckwvr\") pod \"cinder-operator-controller-manager-859b6ccc6-q4tvc\" (UID: \"75e40b24-8291-44fe-bd37-97d493e2c136\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.779756 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.795557 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.797220 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.798291 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.802503 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.812202 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckwvr\" (UniqueName: \"kubernetes.io/projected/75e40b24-8291-44fe-bd37-97d493e2c136-kube-api-access-ckwvr\") pod \"cinder-operator-controller-manager-859b6ccc6-q4tvc\" (UID: \"75e40b24-8291-44fe-bd37-97d493e2c136\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.812623 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.813470 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-hvkcd" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.815924 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nx78h\" (UniqueName: \"kubernetes.io/projected/51f70cd4-a679-426f-9467-1702bb980ada-kube-api-access-nx78h\") pod \"barbican-operator-controller-manager-7d9dfd778-dp4b9\" (UID: \"51f70cd4-a679-426f-9467-1702bb980ada\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.864774 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7chkt\" (UniqueName: \"kubernetes.io/projected/5fc71d02-38a2-4998-8cab-e334a10fcd5c-kube-api-access-7chkt\") pod \"heat-operator-controller-manager-5f64f6f8bb-h8x8k\" (UID: \"5fc71d02-38a2-4998-8cab-e334a10fcd5c\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.864829 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49vzx\" (UniqueName: \"kubernetes.io/projected/7e78b781-84b7-4915-837a-ed1a45d1201e-kube-api-access-49vzx\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.864862 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.864893 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vj4wk\" (UniqueName: \"kubernetes.io/projected/1ec069eb-26b3-408c-a4ba-118d01436ecd-kube-api-access-vj4wk\") pod \"horizon-operator-controller-manager-68c6d99b8f-5jzhg\" (UID: \"1ec069eb-26b3-408c-a4ba-118d01436ecd\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.864935 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49s9g\" (UniqueName: \"kubernetes.io/projected/7214fe12-0140-464c-a856-b1b5482bb635-kube-api-access-49s9g\") pod \"glance-operator-controller-manager-668d9c48b9-md78v\" (UID: \"7214fe12-0140-464c-a856-b1b5482bb635\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.864961 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpq78\" (UniqueName: \"kubernetes.io/projected/fc3a4db2-6980-4bc4-aa20-8340eecc513e-kube-api-access-cpq78\") pod \"designate-operator-controller-manager-78b4bc895b-kzfsd\" (UID: \"fc3a4db2-6980-4bc4-aa20-8340eecc513e\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.878455 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.879485 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.888842 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-8jwqf" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.894372 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.895078 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.895268 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.897693 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-qq4wm" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.903136 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.904059 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.906773 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-8gfr2" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.921241 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.925932 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpq78\" (UniqueName: \"kubernetes.io/projected/fc3a4db2-6980-4bc4-aa20-8340eecc513e-kube-api-access-cpq78\") pod \"designate-operator-controller-manager-78b4bc895b-kzfsd\" (UID: \"fc3a4db2-6980-4bc4-aa20-8340eecc513e\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.940442 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.949435 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49s9g\" (UniqueName: \"kubernetes.io/projected/7214fe12-0140-464c-a856-b1b5482bb635-kube-api-access-49s9g\") pod \"glance-operator-controller-manager-668d9c48b9-md78v\" (UID: \"7214fe12-0140-464c-a856-b1b5482bb635\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.964384 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.965919 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z449w\" (UniqueName: \"kubernetes.io/projected/46dcc222-f54d-4ddd-bc12-71fd2cfc989c-kube-api-access-z449w\") pod \"manila-operator-controller-manager-6546668bfd-96kdd\" (UID: \"46dcc222-f54d-4ddd-bc12-71fd2cfc989c\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.965950 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.965976 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vj4wk\" (UniqueName: \"kubernetes.io/projected/1ec069eb-26b3-408c-a4ba-118d01436ecd-kube-api-access-vj4wk\") pod \"horizon-operator-controller-manager-68c6d99b8f-5jzhg\" (UID: \"1ec069eb-26b3-408c-a4ba-118d01436ecd\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.966006 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhxks\" (UniqueName: \"kubernetes.io/projected/a0087618-94aa-4b5f-a590-9e976a84cbbf-kube-api-access-xhxks\") pod \"keystone-operator-controller-manager-6c69d4788d-4q485\" (UID: \"a0087618-94aa-4b5f-a590-9e976a84cbbf\") " pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.966033 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6b7k\" (UniqueName: \"kubernetes.io/projected/53a59934-39b4-4b0b-bf3d-da06f41ccf7f-kube-api-access-t6b7k\") pod \"ironic-operator-controller-manager-6c548fd776-qp8p2\" (UID: \"53a59934-39b4-4b0b-bf3d-da06f41ccf7f\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.966069 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7chkt\" (UniqueName: \"kubernetes.io/projected/5fc71d02-38a2-4998-8cab-e334a10fcd5c-kube-api-access-7chkt\") pod \"heat-operator-controller-manager-5f64f6f8bb-h8x8k\" (UID: \"5fc71d02-38a2-4998-8cab-e334a10fcd5c\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.966105 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49vzx\" (UniqueName: \"kubernetes.io/projected/7e78b781-84b7-4915-837a-ed1a45d1201e-kube-api-access-49vzx\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:07 crc kubenswrapper[4631]: E1129 04:27:07.966103 4631 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 29 04:27:07 crc kubenswrapper[4631]: E1129 04:27:07.966453 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert podName:7e78b781-84b7-4915-837a-ed1a45d1201e nodeName:}" failed. No retries permitted until 2025-11-29 04:27:08.466433508 +0000 UTC m=+955.530937022 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert") pod "infra-operator-controller-manager-57548d458d-zhzc9" (UID: "7e78b781-84b7-4915-837a-ed1a45d1201e") : secret "infra-operator-webhook-server-cert" not found Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.977160 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.978096 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.980390 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-p2fh4" Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.996401 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68"] Nov 29 04:27:07 crc kubenswrapper[4631]: I1129 04:27:07.997454 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:07.999548 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7chkt\" (UniqueName: \"kubernetes.io/projected/5fc71d02-38a2-4998-8cab-e334a10fcd5c-kube-api-access-7chkt\") pod \"heat-operator-controller-manager-5f64f6f8bb-h8x8k\" (UID: \"5fc71d02-38a2-4998-8cab-e334a10fcd5c\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.003164 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.008832 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.017354 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vj4wk\" (UniqueName: \"kubernetes.io/projected/1ec069eb-26b3-408c-a4ba-118d01436ecd-kube-api-access-vj4wk\") pod \"horizon-operator-controller-manager-68c6d99b8f-5jzhg\" (UID: \"1ec069eb-26b3-408c-a4ba-118d01436ecd\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.019837 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-qm568"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.020911 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.023778 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-vr8lp" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.024167 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.023838 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-lnx9q" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.027877 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49vzx\" (UniqueName: \"kubernetes.io/projected/7e78b781-84b7-4915-837a-ed1a45d1201e-kube-api-access-49vzx\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.029634 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-7gsz2" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.032281 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.034890 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.061176 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.071928 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dhgk\" (UniqueName: \"kubernetes.io/projected/f4f6e611-da9a-42cb-99f8-59b9784b2671-kube-api-access-6dhgk\") pod \"mariadb-operator-controller-manager-56bbcc9d85-plz68\" (UID: \"f4f6e611-da9a-42cb-99f8-59b9784b2671\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.071973 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z449w\" (UniqueName: \"kubernetes.io/projected/46dcc222-f54d-4ddd-bc12-71fd2cfc989c-kube-api-access-z449w\") pod \"manila-operator-controller-manager-6546668bfd-96kdd\" (UID: \"46dcc222-f54d-4ddd-bc12-71fd2cfc989c\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.072016 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpk44\" (UniqueName: \"kubernetes.io/projected/01ca8f91-4e45-4bb2-a44f-a17d6701e529-kube-api-access-fpk44\") pod \"nova-operator-controller-manager-697bc559fc-6mrwx\" (UID: \"01ca8f91-4e45-4bb2-a44f-a17d6701e529\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.072043 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb76x\" (UniqueName: \"kubernetes.io/projected/9429868a-7e85-4c45-a3ff-e05af34c9854-kube-api-access-rb76x\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-lp8wb\" (UID: \"9429868a-7e85-4c45-a3ff-e05af34c9854\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.072090 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhxks\" (UniqueName: \"kubernetes.io/projected/a0087618-94aa-4b5f-a590-9e976a84cbbf-kube-api-access-xhxks\") pod \"keystone-operator-controller-manager-6c69d4788d-4q485\" (UID: \"a0087618-94aa-4b5f-a590-9e976a84cbbf\") " pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.072142 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6b7k\" (UniqueName: \"kubernetes.io/projected/53a59934-39b4-4b0b-bf3d-da06f41ccf7f-kube-api-access-t6b7k\") pod \"ironic-operator-controller-manager-6c548fd776-qp8p2\" (UID: \"53a59934-39b4-4b0b-bf3d-da06f41ccf7f\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.072281 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlwbd\" (UniqueName: \"kubernetes.io/projected/f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb-kube-api-access-jlwbd\") pod \"octavia-operator-controller-manager-998648c74-qm568\" (UID: \"f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.072427 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.081281 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.105974 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6b7k\" (UniqueName: \"kubernetes.io/projected/53a59934-39b4-4b0b-bf3d-da06f41ccf7f-kube-api-access-t6b7k\") pod \"ironic-operator-controller-manager-6c548fd776-qp8p2\" (UID: \"53a59934-39b4-4b0b-bf3d-da06f41ccf7f\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.124218 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhxks\" (UniqueName: \"kubernetes.io/projected/a0087618-94aa-4b5f-a590-9e976a84cbbf-kube-api-access-xhxks\") pod \"keystone-operator-controller-manager-6c69d4788d-4q485\" (UID: \"a0087618-94aa-4b5f-a590-9e976a84cbbf\") " pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.125433 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.159556 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z449w\" (UniqueName: \"kubernetes.io/projected/46dcc222-f54d-4ddd-bc12-71fd2cfc989c-kube-api-access-z449w\") pod \"manila-operator-controller-manager-6546668bfd-96kdd\" (UID: \"46dcc222-f54d-4ddd-bc12-71fd2cfc989c\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.165748 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.173946 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlwbd\" (UniqueName: \"kubernetes.io/projected/f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb-kube-api-access-jlwbd\") pod \"octavia-operator-controller-manager-998648c74-qm568\" (UID: \"f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.174189 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dhgk\" (UniqueName: \"kubernetes.io/projected/f4f6e611-da9a-42cb-99f8-59b9784b2671-kube-api-access-6dhgk\") pod \"mariadb-operator-controller-manager-56bbcc9d85-plz68\" (UID: \"f4f6e611-da9a-42cb-99f8-59b9784b2671\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.174223 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpk44\" (UniqueName: \"kubernetes.io/projected/01ca8f91-4e45-4bb2-a44f-a17d6701e529-kube-api-access-fpk44\") pod \"nova-operator-controller-manager-697bc559fc-6mrwx\" (UID: \"01ca8f91-4e45-4bb2-a44f-a17d6701e529\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.174243 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb76x\" (UniqueName: \"kubernetes.io/projected/9429868a-7e85-4c45-a3ff-e05af34c9854-kube-api-access-rb76x\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-lp8wb\" (UID: \"9429868a-7e85-4c45-a3ff-e05af34c9854\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.204122 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.225017 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlwbd\" (UniqueName: \"kubernetes.io/projected/f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb-kube-api-access-jlwbd\") pod \"octavia-operator-controller-manager-998648c74-qm568\" (UID: \"f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.225426 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dhgk\" (UniqueName: \"kubernetes.io/projected/f4f6e611-da9a-42cb-99f8-59b9784b2671-kube-api-access-6dhgk\") pod \"mariadb-operator-controller-manager-56bbcc9d85-plz68\" (UID: \"f4f6e611-da9a-42cb-99f8-59b9784b2671\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.225799 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpk44\" (UniqueName: \"kubernetes.io/projected/01ca8f91-4e45-4bb2-a44f-a17d6701e529-kube-api-access-fpk44\") pod \"nova-operator-controller-manager-697bc559fc-6mrwx\" (UID: \"01ca8f91-4e45-4bb2-a44f-a17d6701e529\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.225909 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-qm568"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.254891 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb76x\" (UniqueName: \"kubernetes.io/projected/9429868a-7e85-4c45-a3ff-e05af34c9854-kube-api-access-rb76x\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-lp8wb\" (UID: \"9429868a-7e85-4c45-a3ff-e05af34c9854\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.299636 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.305424 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.325734 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.330423 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.336999 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.337504 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-5psbx" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.337644 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.352301 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.411909 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.413485 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.422740 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.423840 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.436429 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.437740 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.456922 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-vrf25" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.457185 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-c87kv" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.457608 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.463399 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.510398 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.511484 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.512241 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.512677 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.513649 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.513694 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.513722 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhwdt\" (UniqueName: \"kubernetes.io/projected/072818bb-f7b6-4dbc-9885-a3a8c68f9494-kube-api-access-rhwdt\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:08 crc kubenswrapper[4631]: E1129 04:27:08.513870 4631 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 29 04:27:08 crc kubenswrapper[4631]: E1129 04:27:08.513908 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert podName:7e78b781-84b7-4915-837a-ed1a45d1201e nodeName:}" failed. No retries permitted until 2025-11-29 04:27:09.513895389 +0000 UTC m=+956.578398903 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert") pod "infra-operator-controller-manager-57548d458d-zhzc9" (UID: "7e78b781-84b7-4915-837a-ed1a45d1201e") : secret "infra-operator-webhook-server-cert" not found Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.518801 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-lq5wf" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.518982 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-mq6fz" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.526124 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.527030 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.528577 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-qj65h" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.542269 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.550397 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.588393 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.614860 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.615461 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hv8n7\" (UniqueName: \"kubernetes.io/projected/9812178e-08d5-487d-b42e-1edcca79850b-kube-api-access-hv8n7\") pod \"placement-operator-controller-manager-78f8948974-5xmrz\" (UID: \"9812178e-08d5-487d-b42e-1edcca79850b\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.615489 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kktx7\" (UniqueName: \"kubernetes.io/projected/82eecaa9-4289-4d37-b953-7c2de1f5a437-kube-api-access-kktx7\") pod \"ovn-operator-controller-manager-b6456fdb6-j9s5g\" (UID: \"82eecaa9-4289-4d37-b953-7c2de1f5a437\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.615508 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz82b\" (UniqueName: \"kubernetes.io/projected/7956b653-1bf2-4bec-8246-0a806ef0716d-kube-api-access-gz82b\") pod \"swift-operator-controller-manager-5f8c65bbfc-rgxmh\" (UID: \"7956b653-1bf2-4bec-8246-0a806ef0716d\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.615530 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vdxx\" (UniqueName: \"kubernetes.io/projected/d1f8729d-3838-42f0-9185-6b4edb74a90f-kube-api-access-2vdxx\") pod \"telemetry-operator-controller-manager-76cc84c6bb-spqmt\" (UID: \"d1f8729d-3838-42f0-9185-6b4edb74a90f\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.615557 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.615600 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhwdt\" (UniqueName: \"kubernetes.io/projected/072818bb-f7b6-4dbc-9885-a3a8c68f9494-kube-api-access-rhwdt\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:08 crc kubenswrapper[4631]: E1129 04:27:08.615857 4631 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 04:27:08 crc kubenswrapper[4631]: E1129 04:27:08.615890 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert podName:072818bb-f7b6-4dbc-9885-a3a8c68f9494 nodeName:}" failed. No retries permitted until 2025-11-29 04:27:09.115878765 +0000 UTC m=+956.180382279 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" (UID: "072818bb-f7b6-4dbc-9885-a3a8c68f9494") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.630407 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.631564 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.632588 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.647695 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-jrmxh" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.664999 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhwdt\" (UniqueName: \"kubernetes.io/projected/072818bb-f7b6-4dbc-9885-a3a8c68f9494-kube-api-access-rhwdt\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.673140 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.674040 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.676226 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.676441 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.683841 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-9fwp7" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.707433 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.720038 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hv8n7\" (UniqueName: \"kubernetes.io/projected/9812178e-08d5-487d-b42e-1edcca79850b-kube-api-access-hv8n7\") pod \"placement-operator-controller-manager-78f8948974-5xmrz\" (UID: \"9812178e-08d5-487d-b42e-1edcca79850b\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.720082 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kktx7\" (UniqueName: \"kubernetes.io/projected/82eecaa9-4289-4d37-b953-7c2de1f5a437-kube-api-access-kktx7\") pod \"ovn-operator-controller-manager-b6456fdb6-j9s5g\" (UID: \"82eecaa9-4289-4d37-b953-7c2de1f5a437\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.720101 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz82b\" (UniqueName: \"kubernetes.io/projected/7956b653-1bf2-4bec-8246-0a806ef0716d-kube-api-access-gz82b\") pod \"swift-operator-controller-manager-5f8c65bbfc-rgxmh\" (UID: \"7956b653-1bf2-4bec-8246-0a806ef0716d\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.720127 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsfgb\" (UniqueName: \"kubernetes.io/projected/ae831fd7-f5a8-4427-a3c7-64ae0a86281f-kube-api-access-wsfgb\") pod \"watcher-operator-controller-manager-769dc69bc-mns7w\" (UID: \"ae831fd7-f5a8-4427-a3c7-64ae0a86281f\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.720145 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vdxx\" (UniqueName: \"kubernetes.io/projected/d1f8729d-3838-42f0-9185-6b4edb74a90f-kube-api-access-2vdxx\") pod \"telemetry-operator-controller-manager-76cc84c6bb-spqmt\" (UID: \"d1f8729d-3838-42f0-9185-6b4edb74a90f\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.720207 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dnjw\" (UniqueName: \"kubernetes.io/projected/fa9bc4b5-9bea-48a5-8d01-1f2cd1957133-kube-api-access-4dnjw\") pod \"test-operator-controller-manager-5854674fcc-7kqbn\" (UID: \"fa9bc4b5-9bea-48a5-8d01-1f2cd1957133\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.745491 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kktx7\" (UniqueName: \"kubernetes.io/projected/82eecaa9-4289-4d37-b953-7c2de1f5a437-kube-api-access-kktx7\") pod \"ovn-operator-controller-manager-b6456fdb6-j9s5g\" (UID: \"82eecaa9-4289-4d37-b953-7c2de1f5a437\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.750589 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.751806 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.754947 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-pjmmb" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.768026 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.776276 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.778569 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz82b\" (UniqueName: \"kubernetes.io/projected/7956b653-1bf2-4bec-8246-0a806ef0716d-kube-api-access-gz82b\") pod \"swift-operator-controller-manager-5f8c65bbfc-rgxmh\" (UID: \"7956b653-1bf2-4bec-8246-0a806ef0716d\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.791315 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hv8n7\" (UniqueName: \"kubernetes.io/projected/9812178e-08d5-487d-b42e-1edcca79850b-kube-api-access-hv8n7\") pod \"placement-operator-controller-manager-78f8948974-5xmrz\" (UID: \"9812178e-08d5-487d-b42e-1edcca79850b\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.802563 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vdxx\" (UniqueName: \"kubernetes.io/projected/d1f8729d-3838-42f0-9185-6b4edb74a90f-kube-api-access-2vdxx\") pod \"telemetry-operator-controller-manager-76cc84c6bb-spqmt\" (UID: \"d1f8729d-3838-42f0-9185-6b4edb74a90f\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.805982 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.824143 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.824192 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p267b\" (UniqueName: \"kubernetes.io/projected/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-kube-api-access-p267b\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.824210 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.824258 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dnjw\" (UniqueName: \"kubernetes.io/projected/fa9bc4b5-9bea-48a5-8d01-1f2cd1957133-kube-api-access-4dnjw\") pod \"test-operator-controller-manager-5854674fcc-7kqbn\" (UID: \"fa9bc4b5-9bea-48a5-8d01-1f2cd1957133\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.824342 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsfgb\" (UniqueName: \"kubernetes.io/projected/ae831fd7-f5a8-4427-a3c7-64ae0a86281f-kube-api-access-wsfgb\") pod \"watcher-operator-controller-manager-769dc69bc-mns7w\" (UID: \"ae831fd7-f5a8-4427-a3c7-64ae0a86281f\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.840053 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.843746 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsfgb\" (UniqueName: \"kubernetes.io/projected/ae831fd7-f5a8-4427-a3c7-64ae0a86281f-kube-api-access-wsfgb\") pod \"watcher-operator-controller-manager-769dc69bc-mns7w\" (UID: \"ae831fd7-f5a8-4427-a3c7-64ae0a86281f\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.847127 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.871961 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dnjw\" (UniqueName: \"kubernetes.io/projected/fa9bc4b5-9bea-48a5-8d01-1f2cd1957133-kube-api-access-4dnjw\") pod \"test-operator-controller-manager-5854674fcc-7kqbn\" (UID: \"fa9bc4b5-9bea-48a5-8d01-1f2cd1957133\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.875893 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.890910 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9"] Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.927003 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p267b\" (UniqueName: \"kubernetes.io/projected/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-kube-api-access-p267b\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.927044 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.927152 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrzn8\" (UniqueName: \"kubernetes.io/projected/99a5846d-1348-421d-9637-cbd86e552f1c-kube-api-access-vrzn8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-97pk4\" (UID: \"99a5846d-1348-421d-9637-cbd86e552f1c\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.927186 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:08 crc kubenswrapper[4631]: E1129 04:27:08.927297 4631 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 04:27:08 crc kubenswrapper[4631]: E1129 04:27:08.927359 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs podName:cb8f6e48-60ac-497b-ab0a-8d556f77a1ce nodeName:}" failed. No retries permitted until 2025-11-29 04:27:09.427345319 +0000 UTC m=+956.491848833 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs") pod "openstack-operator-controller-manager-79cbf6968-9cwcq" (UID: "cb8f6e48-60ac-497b-ab0a-8d556f77a1ce") : secret "metrics-server-cert" not found Nov 29 04:27:08 crc kubenswrapper[4631]: E1129 04:27:08.927647 4631 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 04:27:08 crc kubenswrapper[4631]: E1129 04:27:08.928524 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs podName:cb8f6e48-60ac-497b-ab0a-8d556f77a1ce nodeName:}" failed. No retries permitted until 2025-11-29 04:27:09.428503588 +0000 UTC m=+956.493007112 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs") pod "openstack-operator-controller-manager-79cbf6968-9cwcq" (UID: "cb8f6e48-60ac-497b-ab0a-8d556f77a1ce") : secret "webhook-server-cert" not found Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.957961 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p267b\" (UniqueName: \"kubernetes.io/projected/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-kube-api-access-p267b\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:08 crc kubenswrapper[4631]: I1129 04:27:08.985516 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.028459 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrzn8\" (UniqueName: \"kubernetes.io/projected/99a5846d-1348-421d-9637-cbd86e552f1c-kube-api-access-vrzn8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-97pk4\" (UID: \"99a5846d-1348-421d-9637-cbd86e552f1c\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.052054 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrzn8\" (UniqueName: \"kubernetes.io/projected/99a5846d-1348-421d-9637-cbd86e552f1c-kube-api-access-vrzn8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-97pk4\" (UID: \"99a5846d-1348-421d-9637-cbd86e552f1c\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.077893 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.131440 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.131650 4631 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.131701 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert podName:072818bb-f7b6-4dbc-9885-a3a8c68f9494 nodeName:}" failed. No retries permitted until 2025-11-29 04:27:10.131687301 +0000 UTC m=+957.196190815 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" (UID: "072818bb-f7b6-4dbc-9885-a3a8c68f9494") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.268205 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd"] Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.435875 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.435918 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.436063 4631 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.436106 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs podName:cb8f6e48-60ac-497b-ab0a-8d556f77a1ce nodeName:}" failed. No retries permitted until 2025-11-29 04:27:10.436092801 +0000 UTC m=+957.500596315 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs") pod "openstack-operator-controller-manager-79cbf6968-9cwcq" (UID: "cb8f6e48-60ac-497b-ab0a-8d556f77a1ce") : secret "webhook-server-cert" not found Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.436813 4631 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.436912 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs podName:cb8f6e48-60ac-497b-ab0a-8d556f77a1ce nodeName:}" failed. No retries permitted until 2025-11-29 04:27:10.436879231 +0000 UTC m=+957.501382745 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs") pod "openstack-operator-controller-manager-79cbf6968-9cwcq" (UID: "cb8f6e48-60ac-497b-ab0a-8d556f77a1ce") : secret "metrics-server-cert" not found Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.476683 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc"] Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.496031 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd"] Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.496762 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" event={"ID":"46dcc222-f54d-4ddd-bc12-71fd2cfc989c","Type":"ContainerStarted","Data":"51cc9e0c6d7d39b5a65a07e161d6ce2a2b042e6a8a3cf644308f9117640ef423"} Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.497893 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" event={"ID":"75e40b24-8291-44fe-bd37-97d493e2c136","Type":"ContainerStarted","Data":"63d2417bac3b167beb6ad51333e9737a5668665cb9325f1b1c70b5b8af77c1b8"} Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.500217 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" event={"ID":"51f70cd4-a679-426f-9467-1702bb980ada","Type":"ContainerStarted","Data":"310d3ddcd1ac24c1fdfce2ef690397c82eb02bf8fdd0b7d143b2bc06a14b470b"} Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.505892 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485"] Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.518365 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2"] Nov 29 04:27:09 crc kubenswrapper[4631]: W1129 04:27:09.520525 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53a59934_39b4_4b0b_bf3d_da06f41ccf7f.slice/crio-18d4fe877146542417740895c3f780323ce340ad4ebf9e38ae06773bc177e702 WatchSource:0}: Error finding container 18d4fe877146542417740895c3f780323ce340ad4ebf9e38ae06773bc177e702: Status 404 returned error can't find the container with id 18d4fe877146542417740895c3f780323ce340ad4ebf9e38ae06773bc177e702 Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.538684 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.538864 4631 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.538934 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert podName:7e78b781-84b7-4915-837a-ed1a45d1201e nodeName:}" failed. No retries permitted until 2025-11-29 04:27:11.538916548 +0000 UTC m=+958.603420062 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert") pod "infra-operator-controller-manager-57548d458d-zhzc9" (UID: "7e78b781-84b7-4915-837a-ed1a45d1201e") : secret "infra-operator-webhook-server-cert" not found Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.721241 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k"] Nov 29 04:27:09 crc kubenswrapper[4631]: W1129 04:27:09.725291 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fc71d02_38a2_4998_8cab_e334a10fcd5c.slice/crio-ab21ca9e763ca4d2975ca3df6d3eed02cfaed8d8c50243092248da542861d965 WatchSource:0}: Error finding container ab21ca9e763ca4d2975ca3df6d3eed02cfaed8d8c50243092248da542861d965: Status 404 returned error can't find the container with id ab21ca9e763ca4d2975ca3df6d3eed02cfaed8d8c50243092248da542861d965 Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.751918 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt"] Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.769204 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68"] Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.777176 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx"] Nov 29 04:27:09 crc kubenswrapper[4631]: W1129 04:27:09.781072 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1f8729d_3838_42f0_9185_6b4edb74a90f.slice/crio-e28bf18357564aeac07c1cc2c040cbfc445cab64a61bda7cacd91b196e69b082 WatchSource:0}: Error finding container e28bf18357564aeac07c1cc2c040cbfc445cab64a61bda7cacd91b196e69b082: Status 404 returned error can't find the container with id e28bf18357564aeac07c1cc2c040cbfc445cab64a61bda7cacd91b196e69b082 Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.782260 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg"] Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.786536 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb"] Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.913402 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g"] Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.931407 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz"] Nov 29 04:27:09 crc kubenswrapper[4631]: W1129 04:27:09.934245 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9812178e_08d5_487d_b42e_1edcca79850b.slice/crio-9c2b17d81aedc361e0eb2aac2e5e4c9fb8a8413131d981bf62811640085babaa WatchSource:0}: Error finding container 9c2b17d81aedc361e0eb2aac2e5e4c9fb8a8413131d981bf62811640085babaa: Status 404 returned error can't find the container with id 9c2b17d81aedc361e0eb2aac2e5e4c9fb8a8413131d981bf62811640085babaa Nov 29 04:27:09 crc kubenswrapper[4631]: W1129 04:27:09.941507 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf42d6bcb_b085_4dae_acbc_bdc8cd80c5bb.slice/crio-24b08f52590700b1ecbcdca8c8133d7da4f67bd725f2356bd3f135068b3b0654 WatchSource:0}: Error finding container 24b08f52590700b1ecbcdca8c8133d7da4f67bd725f2356bd3f135068b3b0654: Status 404 returned error can't find the container with id 24b08f52590700b1ecbcdca8c8133d7da4f67bd725f2356bd3f135068b3b0654 Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.944270 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-qm568"] Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.967685 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kktx7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-j9s5g_openstack-operators(82eecaa9-4289-4d37-b953-7c2de1f5a437): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:09 crc kubenswrapper[4631]: W1129 04:27:09.969274 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7214fe12_0140_464c_a856_b1b5482bb635.slice/crio-69a979c07826606a5e5ff922ee0f3eaae40683be2a6c453d2570fd5c042f89e8 WatchSource:0}: Error finding container 69a979c07826606a5e5ff922ee0f3eaae40683be2a6c453d2570fd5c042f89e8: Status 404 returned error can't find the container with id 69a979c07826606a5e5ff922ee0f3eaae40683be2a6c453d2570fd5c042f89e8 Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.971917 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kktx7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-j9s5g_openstack-operators(82eecaa9-4289-4d37-b953-7c2de1f5a437): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.973658 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" podUID="82eecaa9-4289-4d37-b953-7c2de1f5a437" Nov 29 04:27:09 crc kubenswrapper[4631]: E1129 04:27:09.976358 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:440cde33d3a2a0c545cd1c110a3634eb85544370f448865b97a13c38034b0172,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-49s9g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-668d9c48b9-md78v_openstack-operators(7214fe12-0140-464c-a856-b1b5482bb635): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.978097 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v"] Nov 29 04:27:09 crc kubenswrapper[4631]: I1129 04:27:09.983631 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4"] Nov 29 04:27:09 crc kubenswrapper[4631]: W1129 04:27:09.993815 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa9bc4b5_9bea_48a5_8d01_1f2cd1957133.slice/crio-1f3d49f0d2e3b065a6eb347d8b47ea210195c98f406a42c455235fcc05cab83f WatchSource:0}: Error finding container 1f3d49f0d2e3b065a6eb347d8b47ea210195c98f406a42c455235fcc05cab83f: Status 404 returned error can't find the container with id 1f3d49f0d2e3b065a6eb347d8b47ea210195c98f406a42c455235fcc05cab83f Nov 29 04:27:10 crc kubenswrapper[4631]: W1129 04:27:10.002986 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae831fd7_f5a8_4427_a3c7_64ae0a86281f.slice/crio-ce4d442593549aa339b95a60a1d32ecc29c81a3bd7679f21b6cc6c9c26e42d36 WatchSource:0}: Error finding container ce4d442593549aa339b95a60a1d32ecc29c81a3bd7679f21b6cc6c9c26e42d36: Status 404 returned error can't find the container with id ce4d442593549aa339b95a60a1d32ecc29c81a3bd7679f21b6cc6c9c26e42d36 Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.003866 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn"] Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.009922 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-49s9g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-668d9c48b9-md78v_openstack-operators(7214fe12-0140-464c-a856-b1b5482bb635): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.012406 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" podUID="7214fe12-0140-464c-a856-b1b5482bb635" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.016651 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4dnjw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-7kqbn_openstack-operators(fa9bc4b5-9bea-48a5-8d01-1f2cd1957133): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.016614 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gz82b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-rgxmh_openstack-operators(7956b653-1bf2-4bec-8246-0a806ef0716d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.016776 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vrzn8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-97pk4_openstack-operators(99a5846d-1348-421d-9637-cbd86e552f1c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.017985 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" podUID="99a5846d-1348-421d-9637-cbd86e552f1c" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.019797 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4dnjw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-7kqbn_openstack-operators(fa9bc4b5-9bea-48a5-8d01-1f2cd1957133): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.019805 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gz82b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-rgxmh_openstack-operators(7956b653-1bf2-4bec-8246-0a806ef0716d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.021360 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh"] Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.021437 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" podUID="7956b653-1bf2-4bec-8246-0a806ef0716d" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.021481 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" podUID="fa9bc4b5-9bea-48a5-8d01-1f2cd1957133" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.026401 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wsfgb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-769dc69bc-mns7w_openstack-operators(ae831fd7-f5a8-4427-a3c7-64ae0a86281f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.029406 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w"] Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.030477 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wsfgb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-769dc69bc-mns7w_openstack-operators(ae831fd7-f5a8-4427-a3c7-64ae0a86281f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.032771 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" podUID="ae831fd7-f5a8-4427-a3c7-64ae0a86281f" Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.151278 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.151704 4631 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.151854 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert podName:072818bb-f7b6-4dbc-9885-a3a8c68f9494 nodeName:}" failed. No retries permitted until 2025-11-29 04:27:12.151838592 +0000 UTC m=+959.216342096 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" (UID: "072818bb-f7b6-4dbc-9885-a3a8c68f9494") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.457738 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.457796 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.457970 4631 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.458018 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs podName:cb8f6e48-60ac-497b-ab0a-8d556f77a1ce nodeName:}" failed. No retries permitted until 2025-11-29 04:27:12.458003156 +0000 UTC m=+959.522506670 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs") pod "openstack-operator-controller-manager-79cbf6968-9cwcq" (UID: "cb8f6e48-60ac-497b-ab0a-8d556f77a1ce") : secret "webhook-server-cert" not found Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.458703 4631 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.458751 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs podName:cb8f6e48-60ac-497b-ab0a-8d556f77a1ce nodeName:}" failed. No retries permitted until 2025-11-29 04:27:12.458731364 +0000 UTC m=+959.523234888 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs") pod "openstack-operator-controller-manager-79cbf6968-9cwcq" (UID: "cb8f6e48-60ac-497b-ab0a-8d556f77a1ce") : secret "metrics-server-cert" not found Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.508882 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" event={"ID":"5fc71d02-38a2-4998-8cab-e334a10fcd5c","Type":"ContainerStarted","Data":"ab21ca9e763ca4d2975ca3df6d3eed02cfaed8d8c50243092248da542861d965"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.510125 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" event={"ID":"1ec069eb-26b3-408c-a4ba-118d01436ecd","Type":"ContainerStarted","Data":"28fa5dd294b23eeb518d142976f8d688e574f018c636f0b01515ed7fa2bfaa42"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.511531 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" event={"ID":"9429868a-7e85-4c45-a3ff-e05af34c9854","Type":"ContainerStarted","Data":"5bf2a0826e488cbda60c65a1483bd5c0cc59a18aba447f1c44e73b73f81c2578"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.512577 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" event={"ID":"9812178e-08d5-487d-b42e-1edcca79850b","Type":"ContainerStarted","Data":"9c2b17d81aedc361e0eb2aac2e5e4c9fb8a8413131d981bf62811640085babaa"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.517749 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" event={"ID":"01ca8f91-4e45-4bb2-a44f-a17d6701e529","Type":"ContainerStarted","Data":"cc6cb927ec5519a879bb44d79afe88c79d7f7957ed9194ea594d2df1ee5ab4ba"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.520913 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" event={"ID":"7214fe12-0140-464c-a856-b1b5482bb635","Type":"ContainerStarted","Data":"69a979c07826606a5e5ff922ee0f3eaae40683be2a6c453d2570fd5c042f89e8"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.522233 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" event={"ID":"ae831fd7-f5a8-4427-a3c7-64ae0a86281f","Type":"ContainerStarted","Data":"ce4d442593549aa339b95a60a1d32ecc29c81a3bd7679f21b6cc6c9c26e42d36"} Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.552500 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" podUID="ae831fd7-f5a8-4427-a3c7-64ae0a86281f" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.552514 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:440cde33d3a2a0c545cd1c110a3634eb85544370f448865b97a13c38034b0172\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" podUID="7214fe12-0140-464c-a856-b1b5482bb635" Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.553108 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" event={"ID":"d1f8729d-3838-42f0-9185-6b4edb74a90f","Type":"ContainerStarted","Data":"e28bf18357564aeac07c1cc2c040cbfc445cab64a61bda7cacd91b196e69b082"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.555667 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" event={"ID":"fc3a4db2-6980-4bc4-aa20-8340eecc513e","Type":"ContainerStarted","Data":"c9c02cc905c0a1c794d18eb4fcdb93017a20a8352e9583b3f505c9082a560505"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.557075 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" event={"ID":"99a5846d-1348-421d-9637-cbd86e552f1c","Type":"ContainerStarted","Data":"a4a17e7209e088fe585431c960220ae48cdb310f96a7d70340744f0553f077b3"} Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.558619 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" podUID="99a5846d-1348-421d-9637-cbd86e552f1c" Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.560099 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" event={"ID":"a0087618-94aa-4b5f-a590-9e976a84cbbf","Type":"ContainerStarted","Data":"9b901b62c407a41f33d686eceac438954ed16808626e90d042c8767d0c90f1b4"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.578340 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" event={"ID":"fa9bc4b5-9bea-48a5-8d01-1f2cd1957133","Type":"ContainerStarted","Data":"1f3d49f0d2e3b065a6eb347d8b47ea210195c98f406a42c455235fcc05cab83f"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.601158 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" event={"ID":"53a59934-39b4-4b0b-bf3d-da06f41ccf7f","Type":"ContainerStarted","Data":"18d4fe877146542417740895c3f780323ce340ad4ebf9e38ae06773bc177e702"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.602708 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" event={"ID":"82eecaa9-4289-4d37-b953-7c2de1f5a437","Type":"ContainerStarted","Data":"63fc0c2ccae4280391aff9f9a01ebbbe8f2c375408be9df41e50c4e17b861e6c"} Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.611206 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" podUID="fa9bc4b5-9bea-48a5-8d01-1f2cd1957133" Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.611492 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" podUID="82eecaa9-4289-4d37-b953-7c2de1f5a437" Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.613293 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" event={"ID":"f4f6e611-da9a-42cb-99f8-59b9784b2671","Type":"ContainerStarted","Data":"17b9b818391b52bbb485f4c197449a5a67733ccdfb2768d76c0ec26f9dab7b2d"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.622894 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" event={"ID":"f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb","Type":"ContainerStarted","Data":"24b08f52590700b1ecbcdca8c8133d7da4f67bd725f2356bd3f135068b3b0654"} Nov 29 04:27:10 crc kubenswrapper[4631]: I1129 04:27:10.626644 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" event={"ID":"7956b653-1bf2-4bec-8246-0a806ef0716d","Type":"ContainerStarted","Data":"6b3b1805d83c60e263efe491f114457b8b0c988909616630f7a2b16b3d7d4225"} Nov 29 04:27:10 crc kubenswrapper[4631]: E1129 04:27:10.630713 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" podUID="7956b653-1bf2-4bec-8246-0a806ef0716d" Nov 29 04:27:11 crc kubenswrapper[4631]: I1129 04:27:11.580976 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:11 crc kubenswrapper[4631]: E1129 04:27:11.581127 4631 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 29 04:27:11 crc kubenswrapper[4631]: E1129 04:27:11.581167 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert podName:7e78b781-84b7-4915-837a-ed1a45d1201e nodeName:}" failed. No retries permitted until 2025-11-29 04:27:15.581156999 +0000 UTC m=+962.645660513 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert") pod "infra-operator-controller-manager-57548d458d-zhzc9" (UID: "7e78b781-84b7-4915-837a-ed1a45d1201e") : secret "infra-operator-webhook-server-cert" not found Nov 29 04:27:11 crc kubenswrapper[4631]: E1129 04:27:11.652708 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" podUID="99a5846d-1348-421d-9637-cbd86e552f1c" Nov 29 04:27:11 crc kubenswrapper[4631]: E1129 04:27:11.654013 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" podUID="ae831fd7-f5a8-4427-a3c7-64ae0a86281f" Nov 29 04:27:11 crc kubenswrapper[4631]: E1129 04:27:11.654090 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" podUID="fa9bc4b5-9bea-48a5-8d01-1f2cd1957133" Nov 29 04:27:11 crc kubenswrapper[4631]: E1129 04:27:11.654135 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:440cde33d3a2a0c545cd1c110a3634eb85544370f448865b97a13c38034b0172\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" podUID="7214fe12-0140-464c-a856-b1b5482bb635" Nov 29 04:27:11 crc kubenswrapper[4631]: E1129 04:27:11.654181 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" podUID="82eecaa9-4289-4d37-b953-7c2de1f5a437" Nov 29 04:27:11 crc kubenswrapper[4631]: E1129 04:27:11.654349 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" podUID="7956b653-1bf2-4bec-8246-0a806ef0716d" Nov 29 04:27:12 crc kubenswrapper[4631]: I1129 04:27:12.198777 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:12 crc kubenswrapper[4631]: E1129 04:27:12.199181 4631 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 04:27:12 crc kubenswrapper[4631]: E1129 04:27:12.199393 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert podName:072818bb-f7b6-4dbc-9885-a3a8c68f9494 nodeName:}" failed. No retries permitted until 2025-11-29 04:27:16.199371702 +0000 UTC m=+963.263875216 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" (UID: "072818bb-f7b6-4dbc-9885-a3a8c68f9494") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 04:27:12 crc kubenswrapper[4631]: I1129 04:27:12.503486 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:12 crc kubenswrapper[4631]: I1129 04:27:12.503535 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:12 crc kubenswrapper[4631]: E1129 04:27:12.503678 4631 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 04:27:12 crc kubenswrapper[4631]: E1129 04:27:12.503723 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs podName:cb8f6e48-60ac-497b-ab0a-8d556f77a1ce nodeName:}" failed. No retries permitted until 2025-11-29 04:27:16.503711443 +0000 UTC m=+963.568214957 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs") pod "openstack-operator-controller-manager-79cbf6968-9cwcq" (UID: "cb8f6e48-60ac-497b-ab0a-8d556f77a1ce") : secret "webhook-server-cert" not found Nov 29 04:27:12 crc kubenswrapper[4631]: E1129 04:27:12.504030 4631 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 04:27:12 crc kubenswrapper[4631]: E1129 04:27:12.504074 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs podName:cb8f6e48-60ac-497b-ab0a-8d556f77a1ce nodeName:}" failed. No retries permitted until 2025-11-29 04:27:16.504065231 +0000 UTC m=+963.568568755 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs") pod "openstack-operator-controller-manager-79cbf6968-9cwcq" (UID: "cb8f6e48-60ac-497b-ab0a-8d556f77a1ce") : secret "metrics-server-cert" not found Nov 29 04:27:15 crc kubenswrapper[4631]: I1129 04:27:15.673918 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:15 crc kubenswrapper[4631]: E1129 04:27:15.674485 4631 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 29 04:27:15 crc kubenswrapper[4631]: E1129 04:27:15.674529 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert podName:7e78b781-84b7-4915-837a-ed1a45d1201e nodeName:}" failed. No retries permitted until 2025-11-29 04:27:23.674516757 +0000 UTC m=+970.739020271 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert") pod "infra-operator-controller-manager-57548d458d-zhzc9" (UID: "7e78b781-84b7-4915-837a-ed1a45d1201e") : secret "infra-operator-webhook-server-cert" not found Nov 29 04:27:16 crc kubenswrapper[4631]: I1129 04:27:16.200112 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:16 crc kubenswrapper[4631]: E1129 04:27:16.200268 4631 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 04:27:16 crc kubenswrapper[4631]: E1129 04:27:16.200355 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert podName:072818bb-f7b6-4dbc-9885-a3a8c68f9494 nodeName:}" failed. No retries permitted until 2025-11-29 04:27:24.200325208 +0000 UTC m=+971.264828722 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" (UID: "072818bb-f7b6-4dbc-9885-a3a8c68f9494") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 29 04:27:16 crc kubenswrapper[4631]: I1129 04:27:16.506263 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:16 crc kubenswrapper[4631]: E1129 04:27:16.506842 4631 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 29 04:27:16 crc kubenswrapper[4631]: E1129 04:27:16.506937 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs podName:cb8f6e48-60ac-497b-ab0a-8d556f77a1ce nodeName:}" failed. No retries permitted until 2025-11-29 04:27:24.506919172 +0000 UTC m=+971.571422686 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs") pod "openstack-operator-controller-manager-79cbf6968-9cwcq" (UID: "cb8f6e48-60ac-497b-ab0a-8d556f77a1ce") : secret "metrics-server-cert" not found Nov 29 04:27:16 crc kubenswrapper[4631]: I1129 04:27:16.508839 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:16 crc kubenswrapper[4631]: E1129 04:27:16.509263 4631 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 29 04:27:16 crc kubenswrapper[4631]: E1129 04:27:16.509355 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs podName:cb8f6e48-60ac-497b-ab0a-8d556f77a1ce nodeName:}" failed. No retries permitted until 2025-11-29 04:27:24.509312191 +0000 UTC m=+971.573815705 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs") pod "openstack-operator-controller-manager-79cbf6968-9cwcq" (UID: "cb8f6e48-60ac-497b-ab0a-8d556f77a1ce") : secret "webhook-server-cert" not found Nov 29 04:27:23 crc kubenswrapper[4631]: I1129 04:27:23.757130 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:23 crc kubenswrapper[4631]: I1129 04:27:23.770282 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e78b781-84b7-4915-837a-ed1a45d1201e-cert\") pod \"infra-operator-controller-manager-57548d458d-zhzc9\" (UID: \"7e78b781-84b7-4915-837a-ed1a45d1201e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.071507 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-hvkcd" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.079076 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.266663 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.276530 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/072818bb-f7b6-4dbc-9885-a3a8c68f9494-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf\" (UID: \"072818bb-f7b6-4dbc-9885-a3a8c68f9494\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.570974 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-5psbx" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.571769 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.572372 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.580600 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.581713 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-webhook-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.584188 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cb8f6e48-60ac-497b-ab0a-8d556f77a1ce-metrics-certs\") pod \"openstack-operator-controller-manager-79cbf6968-9cwcq\" (UID: \"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce\") " pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.612066 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-9fwp7" Nov 29 04:27:24 crc kubenswrapper[4631]: I1129 04:27:24.619584 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:28 crc kubenswrapper[4631]: E1129 04:27:28.803708 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:9e847f4dbdea19ab997f32a02b3680a9bd966f9c705911645c3866a19fda9ea5" Nov 29 04:27:28 crc kubenswrapper[4631]: E1129 04:27:28.804690 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:9e847f4dbdea19ab997f32a02b3680a9bd966f9c705911645c3866a19fda9ea5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vj4wk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-68c6d99b8f-5jzhg_openstack-operators(1ec069eb-26b3-408c-a4ba-118d01436ecd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:27:31 crc kubenswrapper[4631]: E1129 04:27:31.676565 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f" Nov 29 04:27:31 crc kubenswrapper[4631]: E1129 04:27:31.676891 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hv8n7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-5xmrz_openstack-operators(9812178e-08d5-487d-b42e-1edcca79850b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:27:33 crc kubenswrapper[4631]: E1129 04:27:33.529393 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:600ca007e493d3af0fcc2ebac92e8da5efd2afe812b62d7d3d4dd0115bdf05d7" Nov 29 04:27:33 crc kubenswrapper[4631]: E1129 04:27:33.529721 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:600ca007e493d3af0fcc2ebac92e8da5efd2afe812b62d7d3d4dd0115bdf05d7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6dhgk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-56bbcc9d85-plz68_openstack-operators(f4f6e611-da9a-42cb-99f8-59b9784b2671): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:27:35 crc kubenswrapper[4631]: E1129 04:27:35.739176 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385" Nov 29 04:27:35 crc kubenswrapper[4631]: E1129 04:27:35.739358 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2vdxx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-spqmt_openstack-operators(d1f8729d-3838-42f0-9185-6b4edb74a90f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:27:37 crc kubenswrapper[4631]: E1129 04:27:37.265796 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168" Nov 29 04:27:37 crc kubenswrapper[4631]: E1129 04:27:37.266535 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jlwbd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-qm568_openstack-operators(f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:27:37 crc kubenswrapper[4631]: E1129 04:27:37.818251 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670" Nov 29 04:27:37 crc kubenswrapper[4631]: E1129 04:27:37.818455 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fpk44,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-6mrwx_openstack-operators(01ca8f91-4e45-4bb2-a44f-a17d6701e529): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:27:46 crc kubenswrapper[4631]: E1129 04:27:46.632310 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.18:5001/openstack-k8s-operators/keystone-operator:6e1c30889aba42df4cb9a8f3da0d8a69c343fdf7" Nov 29 04:27:46 crc kubenswrapper[4631]: E1129 04:27:46.632959 4631 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.18:5001/openstack-k8s-operators/keystone-operator:6e1c30889aba42df4cb9a8f3da0d8a69c343fdf7" Nov 29 04:27:46 crc kubenswrapper[4631]: E1129 04:27:46.633111 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.18:5001/openstack-k8s-operators/keystone-operator:6e1c30889aba42df4cb9a8f3da0d8a69c343fdf7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xhxks,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-6c69d4788d-4q485_openstack-operators(a0087618-94aa-4b5f-a590-9e976a84cbbf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:27:47 crc kubenswrapper[4631]: E1129 04:27:47.205387 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 29 04:27:47 crc kubenswrapper[4631]: E1129 04:27:47.205843 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vrzn8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-97pk4_openstack-operators(99a5846d-1348-421d-9637-cbd86e552f1c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:27:47 crc kubenswrapper[4631]: E1129 04:27:47.207550 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" podUID="99a5846d-1348-421d-9637-cbd86e552f1c" Nov 29 04:27:47 crc kubenswrapper[4631]: I1129 04:27:47.725074 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq"] Nov 29 04:27:47 crc kubenswrapper[4631]: I1129 04:27:47.941123 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9"] Nov 29 04:27:47 crc kubenswrapper[4631]: I1129 04:27:47.959422 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf"] Nov 29 04:27:48 crc kubenswrapper[4631]: I1129 04:27:48.952448 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" event={"ID":"9429868a-7e85-4c45-a3ff-e05af34c9854","Type":"ContainerStarted","Data":"b75fbaccbd8bee22f2ebba2b860c7b28ab32b9b0d7f386c748509e055a91390b"} Nov 29 04:27:48 crc kubenswrapper[4631]: I1129 04:27:48.958037 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" event={"ID":"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce","Type":"ContainerStarted","Data":"ac31b6673044f59f6e4f437680c1e18a7e02b52dc1cb2d15c26af00b818dcff8"} Nov 29 04:27:48 crc kubenswrapper[4631]: I1129 04:27:48.966397 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" event={"ID":"5fc71d02-38a2-4998-8cab-e334a10fcd5c","Type":"ContainerStarted","Data":"5d443b5d67857c2ff21697c1d1303855b13100aa7674008d1d21691c2bca9618"} Nov 29 04:27:48 crc kubenswrapper[4631]: I1129 04:27:48.985368 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" event={"ID":"51f70cd4-a679-426f-9467-1702bb980ada","Type":"ContainerStarted","Data":"83a347bc90403416512fda33142ae34c1616c1670005870d85f0f1495ac82de9"} Nov 29 04:27:49 crc kubenswrapper[4631]: I1129 04:27:49.000036 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" event={"ID":"7e78b781-84b7-4915-837a-ed1a45d1201e","Type":"ContainerStarted","Data":"1a8f337067e07bc29ba5ea4c16c23d6641dd77f0e8731f84c4a5fd477cff15fe"} Nov 29 04:27:49 crc kubenswrapper[4631]: I1129 04:27:49.026844 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" event={"ID":"53a59934-39b4-4b0b-bf3d-da06f41ccf7f","Type":"ContainerStarted","Data":"b1e393a73620029d538ef57dd2ffff65a02324caebaf987b761c616b787aab76"} Nov 29 04:27:49 crc kubenswrapper[4631]: I1129 04:27:49.040044 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" event={"ID":"75e40b24-8291-44fe-bd37-97d493e2c136","Type":"ContainerStarted","Data":"d3295f6fcb3b07fab3d7f65509b66d57ada72e8da13b4f979d7325ff00a82e13"} Nov 29 04:27:49 crc kubenswrapper[4631]: I1129 04:27:49.075975 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" event={"ID":"072818bb-f7b6-4dbc-9885-a3a8c68f9494","Type":"ContainerStarted","Data":"268731e0ec508a20127d5189c6b6b99896979cb76ecbad120e4c1db8ec339d81"} Nov 29 04:27:49 crc kubenswrapper[4631]: I1129 04:27:49.098708 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" event={"ID":"46dcc222-f54d-4ddd-bc12-71fd2cfc989c","Type":"ContainerStarted","Data":"a0ea9396517bba0e5415ee4ff49acd3a141d34bb2da4cb8d587acc15b49da29f"} Nov 29 04:27:49 crc kubenswrapper[4631]: I1129 04:27:49.106709 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" event={"ID":"fc3a4db2-6980-4bc4-aa20-8340eecc513e","Type":"ContainerStarted","Data":"7625dfd4905778963120dd8f762f756b3cdf86a41f998d348cef5a373e4f4deb"} Nov 29 04:27:50 crc kubenswrapper[4631]: I1129 04:27:50.122934 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" event={"ID":"ae831fd7-f5a8-4427-a3c7-64ae0a86281f","Type":"ContainerStarted","Data":"a837058f6729127db52b3285bb4ea89ac615d6b4b30acf051781f9ab022c40ce"} Nov 29 04:27:50 crc kubenswrapper[4631]: I1129 04:27:50.125481 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" event={"ID":"82eecaa9-4289-4d37-b953-7c2de1f5a437","Type":"ContainerStarted","Data":"15d8c54addc3f548ecabb87805c27d9bcc8428c2b255c7b06f20e55f45554677"} Nov 29 04:27:50 crc kubenswrapper[4631]: I1129 04:27:50.136272 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" event={"ID":"fa9bc4b5-9bea-48a5-8d01-1f2cd1957133","Type":"ContainerStarted","Data":"4ad33641a19733d1c12fa7d95cc3163df33ce9ed0f8ce9e068a888f626ae2017"} Nov 29 04:27:50 crc kubenswrapper[4631]: I1129 04:27:50.716518 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:27:50 crc kubenswrapper[4631]: I1129 04:27:50.716570 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:27:52 crc kubenswrapper[4631]: I1129 04:27:52.149012 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" event={"ID":"7956b653-1bf2-4bec-8246-0a806ef0716d","Type":"ContainerStarted","Data":"80199a52f05bebdb2d0ca8b1fea9679e85399717605bd09aebb59d100fd94032"} Nov 29 04:27:52 crc kubenswrapper[4631]: I1129 04:27:52.153521 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" event={"ID":"cb8f6e48-60ac-497b-ab0a-8d556f77a1ce","Type":"ContainerStarted","Data":"ed63ac1eac955a783b5801b34750d12dd6b9f17b3a29d32ac066e49dd5a1c816"} Nov 29 04:27:52 crc kubenswrapper[4631]: I1129 04:27:52.154867 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:27:52 crc kubenswrapper[4631]: I1129 04:27:52.166201 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" event={"ID":"7214fe12-0140-464c-a856-b1b5482bb635","Type":"ContainerStarted","Data":"32d4b91573524982113f8a8660b2219c8c4eb0c17169e832d5a8541f948eb970"} Nov 29 04:27:52 crc kubenswrapper[4631]: I1129 04:27:52.184641 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" podStartSLOduration=44.184625774 podStartE2EDuration="44.184625774s" podCreationTimestamp="2025-11-29 04:27:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:27:52.17747132 +0000 UTC m=+999.241974834" watchObservedRunningTime="2025-11-29 04:27:52.184625774 +0000 UTC m=+999.249129288" Nov 29 04:27:54 crc kubenswrapper[4631]: E1129 04:27:54.983646 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" podUID="d1f8729d-3838-42f0-9185-6b4edb74a90f" Nov 29 04:27:55 crc kubenswrapper[4631]: E1129 04:27:55.016274 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" podUID="f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb" Nov 29 04:27:55 crc kubenswrapper[4631]: E1129 04:27:55.040464 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" podUID="9812178e-08d5-487d-b42e-1edcca79850b" Nov 29 04:27:55 crc kubenswrapper[4631]: E1129 04:27:55.072689 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" podUID="01ca8f91-4e45-4bb2-a44f-a17d6701e529" Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.196712 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" event={"ID":"fa9bc4b5-9bea-48a5-8d01-1f2cd1957133","Type":"ContainerStarted","Data":"89c8ba367d444f499c26e79d20304bbab05a58dc4ff920e1665696c55155b913"} Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.197055 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" Nov 29 04:27:55 crc kubenswrapper[4631]: E1129 04:27:55.198997 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" podUID="f4f6e611-da9a-42cb-99f8-59b9784b2671" Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.199173 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" event={"ID":"9812178e-08d5-487d-b42e-1edcca79850b","Type":"ContainerStarted","Data":"91e640b12d45e66f70d3740385537ed72ce05188975c3dc1d519af3e054de327"} Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.202123 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" event={"ID":"ae831fd7-f5a8-4427-a3c7-64ae0a86281f","Type":"ContainerStarted","Data":"27ec5e18a8a4d8c744c6d28d8506fdd3d4281f81f2259ed725c436604a59557f"} Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.205167 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.209579 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" event={"ID":"f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb","Type":"ContainerStarted","Data":"a0acc9da5e94de415ed62eec9dfb1037f39c9c53864a990fc3b48560b125eeff"} Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.210095 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.212883 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.214904 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" event={"ID":"072818bb-f7b6-4dbc-9885-a3a8c68f9494","Type":"ContainerStarted","Data":"4c3fd6f4072624ac93172349fa2b58b7cd28d54904f83e16d0067fb24c763ddb"} Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.228619 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7kqbn" podStartSLOduration=2.624245097 podStartE2EDuration="47.228604826s" podCreationTimestamp="2025-11-29 04:27:08 +0000 UTC" firstStartedPulling="2025-11-29 04:27:10.01656703 +0000 UTC m=+957.081070544" lastFinishedPulling="2025-11-29 04:27:54.620926759 +0000 UTC m=+1001.685430273" observedRunningTime="2025-11-29 04:27:55.226130996 +0000 UTC m=+1002.290634520" watchObservedRunningTime="2025-11-29 04:27:55.228604826 +0000 UTC m=+1002.293108340" Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.232726 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" event={"ID":"7e78b781-84b7-4915-837a-ed1a45d1201e","Type":"ContainerStarted","Data":"85ffba04c9cf4de4a2d7f230d1d8aba173ce3c08cb40d096dde17a7b93801c05"} Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.232982 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" event={"ID":"01ca8f91-4e45-4bb2-a44f-a17d6701e529","Type":"ContainerStarted","Data":"cf10f49318c7ae4afb58e211f405c7739eb3f9bf2a318417b27a8c9449329bbc"} Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.235314 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" event={"ID":"d1f8729d-3838-42f0-9185-6b4edb74a90f","Type":"ContainerStarted","Data":"b9dd70a857b3345469a0d7162a03c4d4fb6db07df48cdbbad76878fba33f6a28"} Nov 29 04:27:55 crc kubenswrapper[4631]: I1129 04:27:55.327849 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-mns7w" podStartSLOduration=2.6929959009999997 podStartE2EDuration="47.327833219s" podCreationTimestamp="2025-11-29 04:27:08 +0000 UTC" firstStartedPulling="2025-11-29 04:27:10.026304179 +0000 UTC m=+957.090807693" lastFinishedPulling="2025-11-29 04:27:54.661141497 +0000 UTC m=+1001.725645011" observedRunningTime="2025-11-29 04:27:55.324523498 +0000 UTC m=+1002.389027012" watchObservedRunningTime="2025-11-29 04:27:55.327833219 +0000 UTC m=+1002.392336733" Nov 29 04:27:55 crc kubenswrapper[4631]: E1129 04:27:55.584807 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" podUID="a0087618-94aa-4b5f-a590-9e976a84cbbf" Nov 29 04:27:55 crc kubenswrapper[4631]: E1129 04:27:55.847241 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" podUID="1ec069eb-26b3-408c-a4ba-118d01436ecd" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.257587 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" event={"ID":"9429868a-7e85-4c45-a3ff-e05af34c9854","Type":"ContainerStarted","Data":"a4781840ee830ec1c475fb9e6cd746449ef23346bab676bdf4822ed838d97f4a"} Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.258754 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.264102 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.271643 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" event={"ID":"7214fe12-0140-464c-a856-b1b5482bb635","Type":"ContainerStarted","Data":"47e19cab57ff352b6326e22d03d9e6ef432ab5446b3707e5995c30342201782b"} Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.271980 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.273910 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" event={"ID":"46dcc222-f54d-4ddd-bc12-71fd2cfc989c","Type":"ContainerStarted","Data":"c030d169ab0bd94df9eeb6fa29acb8d15fa8cac1f9755b9955473db54d76bcf0"} Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.274522 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.274693 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.275783 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" event={"ID":"a0087618-94aa-4b5f-a590-9e976a84cbbf","Type":"ContainerStarted","Data":"6ceb75467531aec327813b5318afb2c9fc9691af162e5a322a5f3024bd797cf3"} Nov 29 04:27:56 crc kubenswrapper[4631]: E1129 04:27:56.276962 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.18:5001/openstack-k8s-operators/keystone-operator:6e1c30889aba42df4cb9a8f3da0d8a69c343fdf7\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" podUID="a0087618-94aa-4b5f-a590-9e976a84cbbf" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.277716 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.278392 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" event={"ID":"5fc71d02-38a2-4998-8cab-e334a10fcd5c","Type":"ContainerStarted","Data":"57aa1a0c329e183ea5fe0b2c527d125c947a269c94f3b35c9725061dc25192e1"} Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.279140 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.283595 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.289660 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" event={"ID":"1ec069eb-26b3-408c-a4ba-118d01436ecd","Type":"ContainerStarted","Data":"752a75bc7d6074ec4b9c6fe678aae0c9fce7bccba0e7872d0cb6a171885fbf05"} Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.292919 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-lp8wb" podStartSLOduration=4.114726678 podStartE2EDuration="49.292896056s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.789852311 +0000 UTC m=+956.854355825" lastFinishedPulling="2025-11-29 04:27:54.968021689 +0000 UTC m=+1002.032525203" observedRunningTime="2025-11-29 04:27:56.285184108 +0000 UTC m=+1003.349687612" watchObservedRunningTime="2025-11-29 04:27:56.292896056 +0000 UTC m=+1003.357399570" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.297440 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" event={"ID":"53a59934-39b4-4b0b-bf3d-da06f41ccf7f","Type":"ContainerStarted","Data":"742ecc8ecf50d1929f6a48a59d7c522e6b8e944c4f191abe6f25aa4c2c93e74b"} Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.297670 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.305888 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.308469 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" event={"ID":"82eecaa9-4289-4d37-b953-7c2de1f5a437","Type":"ContainerStarted","Data":"44150e3b52f096c3d393b6a410e8dd503953b327791644b5c1cad3673b6c8c0a"} Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.309267 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.314444 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-96kdd" podStartSLOduration=3.961096201 podStartE2EDuration="49.31443017s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.286679334 +0000 UTC m=+956.351182838" lastFinishedPulling="2025-11-29 04:27:54.640013283 +0000 UTC m=+1001.704516807" observedRunningTime="2025-11-29 04:27:56.313272631 +0000 UTC m=+1003.377776145" watchObservedRunningTime="2025-11-29 04:27:56.31443017 +0000 UTC m=+1003.378933684" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.315098 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.320955 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" event={"ID":"f4f6e611-da9a-42cb-99f8-59b9784b2671","Type":"ContainerStarted","Data":"b25ce1fafff81a83a90a289cf9ac90313f0af02e757e8f67435b5d205c3997fb"} Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.351574 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-h8x8k" podStartSLOduration=4.118097091 podStartE2EDuration="49.351560823s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.734208159 +0000 UTC m=+956.798711673" lastFinishedPulling="2025-11-29 04:27:54.967671901 +0000 UTC m=+1002.032175405" observedRunningTime="2025-11-29 04:27:56.350366203 +0000 UTC m=+1003.414869717" watchObservedRunningTime="2025-11-29 04:27:56.351560823 +0000 UTC m=+1003.416064337" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.430990 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-md78v" podStartSLOduration=4.506297389 podStartE2EDuration="49.430974714s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.976248893 +0000 UTC m=+957.040752407" lastFinishedPulling="2025-11-29 04:27:54.900926218 +0000 UTC m=+1001.965429732" observedRunningTime="2025-11-29 04:27:56.403541887 +0000 UTC m=+1003.468045401" watchObservedRunningTime="2025-11-29 04:27:56.430974714 +0000 UTC m=+1003.495478228" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.482600 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-qp8p2" podStartSLOduration=4.138634356 podStartE2EDuration="49.482584959s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.522885026 +0000 UTC m=+956.587388530" lastFinishedPulling="2025-11-29 04:27:54.866835619 +0000 UTC m=+1001.931339133" observedRunningTime="2025-11-29 04:27:56.479734629 +0000 UTC m=+1003.544238143" watchObservedRunningTime="2025-11-29 04:27:56.482584959 +0000 UTC m=+1003.547088473" Nov 29 04:27:56 crc kubenswrapper[4631]: I1129 04:27:56.528933 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-j9s5g" podStartSLOduration=3.119956153 podStartE2EDuration="48.528917605s" podCreationTimestamp="2025-11-29 04:27:08 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.967562881 +0000 UTC m=+957.032066395" lastFinishedPulling="2025-11-29 04:27:55.376524333 +0000 UTC m=+1002.441027847" observedRunningTime="2025-11-29 04:27:56.515673853 +0000 UTC m=+1003.580177357" watchObservedRunningTime="2025-11-29 04:27:56.528917605 +0000 UTC m=+1003.593421119" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.363508 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" event={"ID":"f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb","Type":"ContainerStarted","Data":"eeb99e31b2b5be20fec83321f3499781942e284919611ecdf74d45bb2298b850"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.365362 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.384809 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" event={"ID":"072818bb-f7b6-4dbc-9885-a3a8c68f9494","Type":"ContainerStarted","Data":"c6fcdc9b8e19ffaf2cec7af5fd1e4c6d0072ec916d8ae6cb283f0a009b27b169"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.385666 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.385801 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" podStartSLOduration=4.651080928 podStartE2EDuration="50.385788762s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.963681046 +0000 UTC m=+957.028184560" lastFinishedPulling="2025-11-29 04:27:55.69838888 +0000 UTC m=+1002.762892394" observedRunningTime="2025-11-29 04:27:57.37951533 +0000 UTC m=+1004.444018844" watchObservedRunningTime="2025-11-29 04:27:57.385788762 +0000 UTC m=+1004.450292276" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.398802 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" event={"ID":"fc3a4db2-6980-4bc4-aa20-8340eecc513e","Type":"ContainerStarted","Data":"ef928101d01e89c66e8581c719feaa08067fba4820550960148802209af51e83"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.399548 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.402178 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" event={"ID":"7956b653-1bf2-4bec-8246-0a806ef0716d","Type":"ContainerStarted","Data":"e9de9c8e17919a66811c78e0c559409485df98dbd8d5a159586fc024b959381f"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.402736 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.403440 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.405558 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.411600 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" event={"ID":"9812178e-08d5-487d-b42e-1edcca79850b","Type":"ContainerStarted","Data":"2ce76c8f701e24dbc0b97cdd52aa8ef13925c066b2c19389dfe58eea8f12969c"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.412260 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.413845 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" event={"ID":"f4f6e611-da9a-42cb-99f8-59b9784b2671","Type":"ContainerStarted","Data":"be8c7f8e633dd03d796b54f8c5a66a4aa25fce72f4a14c760c856eb26323435c"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.414006 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.422155 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" event={"ID":"51f70cd4-a679-426f-9467-1702bb980ada","Type":"ContainerStarted","Data":"1f50391add8096d84904c5108530126df6bc63bdc59e739b8c93e3a5e319a0f9"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.423248 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.427661 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.439521 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-kzfsd" podStartSLOduration=4.542914043 podStartE2EDuration="50.439495838s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.498687144 +0000 UTC m=+956.563190658" lastFinishedPulling="2025-11-29 04:27:55.395268939 +0000 UTC m=+1002.459772453" observedRunningTime="2025-11-29 04:27:57.43915776 +0000 UTC m=+1004.503661274" watchObservedRunningTime="2025-11-29 04:27:57.439495838 +0000 UTC m=+1004.503999352" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.440025 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" podStartSLOduration=43.204138162 podStartE2EDuration="49.440014571s" podCreationTimestamp="2025-11-29 04:27:08 +0000 UTC" firstStartedPulling="2025-11-29 04:27:48.329732545 +0000 UTC m=+995.394236059" lastFinishedPulling="2025-11-29 04:27:54.565608934 +0000 UTC m=+1001.630112468" observedRunningTime="2025-11-29 04:27:57.41652836 +0000 UTC m=+1004.481031884" watchObservedRunningTime="2025-11-29 04:27:57.440014571 +0000 UTC m=+1004.504518085" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.440837 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" event={"ID":"01ca8f91-4e45-4bb2-a44f-a17d6701e529","Type":"ContainerStarted","Data":"2731ae58044493fa032059559809f3360ba71cecd0b7d95f065241adb6295bc6"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.441461 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.444522 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" event={"ID":"7e78b781-84b7-4915-837a-ed1a45d1201e","Type":"ContainerStarted","Data":"aeb6ccfdfe5a55ec753529be92507f5a48a184bc44c720c375baf35f2b60b1ba"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.444754 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.490806 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" event={"ID":"75e40b24-8291-44fe-bd37-97d493e2c136","Type":"ContainerStarted","Data":"cb2dc0449156886a8adc5177f6d55f055a0ce7f64c59cfc5319fbcb505046a15"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.491671 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.496502 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.502568 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-rgxmh" podStartSLOduration=4.077495756 podStartE2EDuration="49.502552542s" podCreationTimestamp="2025-11-29 04:27:08 +0000 UTC" firstStartedPulling="2025-11-29 04:27:10.016484378 +0000 UTC m=+957.080987892" lastFinishedPulling="2025-11-29 04:27:55.441541164 +0000 UTC m=+1002.506044678" observedRunningTime="2025-11-29 04:27:57.50165954 +0000 UTC m=+1004.566163054" watchObservedRunningTime="2025-11-29 04:27:57.502552542 +0000 UTC m=+1004.567056056" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.503645 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" event={"ID":"d1f8729d-3838-42f0-9185-6b4edb74a90f","Type":"ContainerStarted","Data":"6369f59243d0b2be6ca04d4e163817aeff7e3e80ad7ff7db401a8d919c37d02d"} Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.503751 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.615911 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" podStartSLOduration=3.401158367 podStartE2EDuration="50.615895588s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.790136158 +0000 UTC m=+956.854639672" lastFinishedPulling="2025-11-29 04:27:57.004873379 +0000 UTC m=+1004.069376893" observedRunningTime="2025-11-29 04:27:57.580955138 +0000 UTC m=+1004.645458652" watchObservedRunningTime="2025-11-29 04:27:57.615895588 +0000 UTC m=+1004.680399092" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.616755 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" podStartSLOduration=3.777176876 podStartE2EDuration="49.616750029s" podCreationTimestamp="2025-11-29 04:27:08 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.966906685 +0000 UTC m=+957.031410199" lastFinishedPulling="2025-11-29 04:27:55.806479848 +0000 UTC m=+1002.870983352" observedRunningTime="2025-11-29 04:27:57.614720659 +0000 UTC m=+1004.679224173" watchObservedRunningTime="2025-11-29 04:27:57.616750029 +0000 UTC m=+1004.681253543" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.646125 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-dp4b9" podStartSLOduration=4.114942909 podStartE2EDuration="50.646110443s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:08.946474158 +0000 UTC m=+956.010977672" lastFinishedPulling="2025-11-29 04:27:55.477641692 +0000 UTC m=+1002.542145206" observedRunningTime="2025-11-29 04:27:57.643255743 +0000 UTC m=+1004.707759287" watchObservedRunningTime="2025-11-29 04:27:57.646110443 +0000 UTC m=+1004.710613957" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.672463 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" podStartSLOduration=44.432980267 podStartE2EDuration="50.672447083s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:48.324051257 +0000 UTC m=+995.388554771" lastFinishedPulling="2025-11-29 04:27:54.563518063 +0000 UTC m=+1001.628021587" observedRunningTime="2025-11-29 04:27:57.667198496 +0000 UTC m=+1004.731702010" watchObservedRunningTime="2025-11-29 04:27:57.672447083 +0000 UTC m=+1004.736950597" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.710725 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" podStartSLOduration=4.774036031 podStartE2EDuration="50.710692063s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.486562147 +0000 UTC m=+956.551065661" lastFinishedPulling="2025-11-29 04:27:55.423218179 +0000 UTC m=+1002.487721693" observedRunningTime="2025-11-29 04:27:57.69330198 +0000 UTC m=+1004.757805504" watchObservedRunningTime="2025-11-29 04:27:57.710692063 +0000 UTC m=+1004.775195577" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.756026 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" podStartSLOduration=3.644794878 podStartE2EDuration="49.756007645s" podCreationTimestamp="2025-11-29 04:27:08 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.791963192 +0000 UTC m=+956.856466706" lastFinishedPulling="2025-11-29 04:27:55.903175959 +0000 UTC m=+1002.967679473" observedRunningTime="2025-11-29 04:27:57.753770311 +0000 UTC m=+1004.818273835" watchObservedRunningTime="2025-11-29 04:27:57.756007645 +0000 UTC m=+1004.820511159" Nov 29 04:27:57 crc kubenswrapper[4631]: I1129 04:27:57.815990 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" podStartSLOduration=4.554334978 podStartE2EDuration="50.815975173s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.78983552 +0000 UTC m=+956.854339034" lastFinishedPulling="2025-11-29 04:27:56.051475715 +0000 UTC m=+1003.115979229" observedRunningTime="2025-11-29 04:27:57.811621218 +0000 UTC m=+1004.876124732" watchObservedRunningTime="2025-11-29 04:27:57.815975173 +0000 UTC m=+1004.880478687" Nov 29 04:27:58 crc kubenswrapper[4631]: I1129 04:27:58.512677 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" event={"ID":"1ec069eb-26b3-408c-a4ba-118d01436ecd","Type":"ContainerStarted","Data":"90c1bf1497c7b666f2a5628b361433235a0bf7de5318ed6cf0ae91dcb1fda080"} Nov 29 04:27:58 crc kubenswrapper[4631]: I1129 04:27:58.514039 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" Nov 29 04:27:58 crc kubenswrapper[4631]: I1129 04:27:58.516070 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" event={"ID":"a0087618-94aa-4b5f-a590-9e976a84cbbf","Type":"ContainerStarted","Data":"ea1b202d5e80fbc4afe89521ec42a92467c31be6da109a8c25c822629448c0b4"} Nov 29 04:27:58 crc kubenswrapper[4631]: I1129 04:27:58.516396 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" Nov 29 04:27:58 crc kubenswrapper[4631]: I1129 04:27:58.541847 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" podStartSLOduration=4.294729615 podStartE2EDuration="51.541834124s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.790380794 +0000 UTC m=+956.854884308" lastFinishedPulling="2025-11-29 04:27:57.037485303 +0000 UTC m=+1004.101988817" observedRunningTime="2025-11-29 04:27:58.537272444 +0000 UTC m=+1005.601775958" watchObservedRunningTime="2025-11-29 04:27:58.541834124 +0000 UTC m=+1005.606337638" Nov 29 04:27:58 crc kubenswrapper[4631]: I1129 04:27:58.564774 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" podStartSLOduration=3.404267634 podStartE2EDuration="51.564757952s" podCreationTimestamp="2025-11-29 04:27:07 +0000 UTC" firstStartedPulling="2025-11-29 04:27:09.49282438 +0000 UTC m=+956.557327894" lastFinishedPulling="2025-11-29 04:27:57.653314698 +0000 UTC m=+1004.717818212" observedRunningTime="2025-11-29 04:27:58.558458629 +0000 UTC m=+1005.622962143" watchObservedRunningTime="2025-11-29 04:27:58.564757952 +0000 UTC m=+1005.629261466" Nov 29 04:27:59 crc kubenswrapper[4631]: I1129 04:27:59.535587 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf" Nov 29 04:28:00 crc kubenswrapper[4631]: E1129 04:28:00.218298 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" podUID="99a5846d-1348-421d-9637-cbd86e552f1c" Nov 29 04:28:04 crc kubenswrapper[4631]: I1129 04:28:04.087285 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-zhzc9" Nov 29 04:28:04 crc kubenswrapper[4631]: I1129 04:28:04.633050 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-79cbf6968-9cwcq" Nov 29 04:28:08 crc kubenswrapper[4631]: I1129 04:28:08.085829 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-5jzhg" Nov 29 04:28:08 crc kubenswrapper[4631]: I1129 04:28:08.207691 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-6c69d4788d-4q485" Nov 29 04:28:08 crc kubenswrapper[4631]: I1129 04:28:08.340946 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-6mrwx" Nov 29 04:28:08 crc kubenswrapper[4631]: I1129 04:28:08.356463 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-plz68" Nov 29 04:28:08 crc kubenswrapper[4631]: I1129 04:28:08.415750 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-qm568" Nov 29 04:28:08 crc kubenswrapper[4631]: I1129 04:28:08.809588 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-5xmrz" Nov 29 04:28:08 crc kubenswrapper[4631]: I1129 04:28:08.888168 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-spqmt" Nov 29 04:28:13 crc kubenswrapper[4631]: I1129 04:28:13.227774 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 04:28:14 crc kubenswrapper[4631]: I1129 04:28:14.678104 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" event={"ID":"99a5846d-1348-421d-9637-cbd86e552f1c","Type":"ContainerStarted","Data":"bcef51c98f339b9006c7032cb2827739f36e378897a27444c505431dd473ba1c"} Nov 29 04:28:14 crc kubenswrapper[4631]: I1129 04:28:14.695950 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-97pk4" podStartSLOduration=2.806475589 podStartE2EDuration="1m6.695928787s" podCreationTimestamp="2025-11-29 04:27:08 +0000 UTC" firstStartedPulling="2025-11-29 04:27:10.016663653 +0000 UTC m=+957.081167167" lastFinishedPulling="2025-11-29 04:28:13.906116841 +0000 UTC m=+1020.970620365" observedRunningTime="2025-11-29 04:28:14.694186975 +0000 UTC m=+1021.758690499" watchObservedRunningTime="2025-11-29 04:28:14.695928787 +0000 UTC m=+1021.760432301" Nov 29 04:28:20 crc kubenswrapper[4631]: I1129 04:28:20.716374 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:28:20 crc kubenswrapper[4631]: I1129 04:28:20.718410 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.374131 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-httvc"] Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.375625 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.381054 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.381131 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.381258 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-gntsc" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.381360 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.429757 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-httvc"] Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.469506 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c8005bc-207c-4fbc-90d2-01a2ee002123-config\") pod \"dnsmasq-dns-675f4bcbfc-httvc\" (UID: \"0c8005bc-207c-4fbc-90d2-01a2ee002123\") " pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.469553 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v96qp\" (UniqueName: \"kubernetes.io/projected/0c8005bc-207c-4fbc-90d2-01a2ee002123-kube-api-access-v96qp\") pod \"dnsmasq-dns-675f4bcbfc-httvc\" (UID: \"0c8005bc-207c-4fbc-90d2-01a2ee002123\") " pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.473656 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-95trz"] Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.474733 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.479374 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.494072 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-95trz"] Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.570495 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-95trz\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.570544 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msxb8\" (UniqueName: \"kubernetes.io/projected/d802b479-6972-4f7e-825e-c07ca6b5a5fc-kube-api-access-msxb8\") pod \"dnsmasq-dns-78dd6ddcc-95trz\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.570584 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-config\") pod \"dnsmasq-dns-78dd6ddcc-95trz\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.570659 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c8005bc-207c-4fbc-90d2-01a2ee002123-config\") pod \"dnsmasq-dns-675f4bcbfc-httvc\" (UID: \"0c8005bc-207c-4fbc-90d2-01a2ee002123\") " pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.570764 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v96qp\" (UniqueName: \"kubernetes.io/projected/0c8005bc-207c-4fbc-90d2-01a2ee002123-kube-api-access-v96qp\") pod \"dnsmasq-dns-675f4bcbfc-httvc\" (UID: \"0c8005bc-207c-4fbc-90d2-01a2ee002123\") " pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.571483 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c8005bc-207c-4fbc-90d2-01a2ee002123-config\") pod \"dnsmasq-dns-675f4bcbfc-httvc\" (UID: \"0c8005bc-207c-4fbc-90d2-01a2ee002123\") " pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.591068 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v96qp\" (UniqueName: \"kubernetes.io/projected/0c8005bc-207c-4fbc-90d2-01a2ee002123-kube-api-access-v96qp\") pod \"dnsmasq-dns-675f4bcbfc-httvc\" (UID: \"0c8005bc-207c-4fbc-90d2-01a2ee002123\") " pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.671703 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-95trz\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.671742 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msxb8\" (UniqueName: \"kubernetes.io/projected/d802b479-6972-4f7e-825e-c07ca6b5a5fc-kube-api-access-msxb8\") pod \"dnsmasq-dns-78dd6ddcc-95trz\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.671786 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-config\") pod \"dnsmasq-dns-78dd6ddcc-95trz\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.672578 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-95trz\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.672590 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-config\") pod \"dnsmasq-dns-78dd6ddcc-95trz\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.686906 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msxb8\" (UniqueName: \"kubernetes.io/projected/d802b479-6972-4f7e-825e-c07ca6b5a5fc-kube-api-access-msxb8\") pod \"dnsmasq-dns-78dd6ddcc-95trz\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.691251 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" Nov 29 04:28:27 crc kubenswrapper[4631]: I1129 04:28:27.797717 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:28 crc kubenswrapper[4631]: I1129 04:28:28.105594 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-95trz"] Nov 29 04:28:28 crc kubenswrapper[4631]: I1129 04:28:28.121353 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-httvc"] Nov 29 04:28:28 crc kubenswrapper[4631]: I1129 04:28:28.787740 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" event={"ID":"d802b479-6972-4f7e-825e-c07ca6b5a5fc","Type":"ContainerStarted","Data":"dc158dce9ad568a4bb8b73fe5739c0ceea6d4a4460e935182e63cc3ea0b90b75"} Nov 29 04:28:28 crc kubenswrapper[4631]: I1129 04:28:28.789391 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" event={"ID":"0c8005bc-207c-4fbc-90d2-01a2ee002123","Type":"ContainerStarted","Data":"71a8f262930967f27f8a6066a419d02d44146a65910560ae30dd14531e8f9d09"} Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.494142 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-httvc"] Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.542054 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l9xj8"] Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.543054 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.567171 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l9xj8"] Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.629026 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-dns-svc\") pod \"dnsmasq-dns-666b6646f7-l9xj8\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.629091 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzzsh\" (UniqueName: \"kubernetes.io/projected/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-kube-api-access-bzzsh\") pod \"dnsmasq-dns-666b6646f7-l9xj8\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.629112 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-config\") pod \"dnsmasq-dns-666b6646f7-l9xj8\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.729957 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-dns-svc\") pod \"dnsmasq-dns-666b6646f7-l9xj8\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.730031 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzzsh\" (UniqueName: \"kubernetes.io/projected/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-kube-api-access-bzzsh\") pod \"dnsmasq-dns-666b6646f7-l9xj8\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.730052 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-config\") pod \"dnsmasq-dns-666b6646f7-l9xj8\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.730877 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-config\") pod \"dnsmasq-dns-666b6646f7-l9xj8\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.732347 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-dns-svc\") pod \"dnsmasq-dns-666b6646f7-l9xj8\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.757122 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzzsh\" (UniqueName: \"kubernetes.io/projected/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-kube-api-access-bzzsh\") pod \"dnsmasq-dns-666b6646f7-l9xj8\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:30 crc kubenswrapper[4631]: I1129 04:28:30.858636 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.001414 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-95trz"] Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.046940 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-khw5g"] Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.048292 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.081724 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-khw5g"] Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.162123 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hfc8\" (UniqueName: \"kubernetes.io/projected/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-kube-api-access-6hfc8\") pod \"dnsmasq-dns-57d769cc4f-khw5g\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.162586 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-config\") pod \"dnsmasq-dns-57d769cc4f-khw5g\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.162609 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-khw5g\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.265319 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-config\") pod \"dnsmasq-dns-57d769cc4f-khw5g\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.265367 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-khw5g\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.265457 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hfc8\" (UniqueName: \"kubernetes.io/projected/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-kube-api-access-6hfc8\") pod \"dnsmasq-dns-57d769cc4f-khw5g\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.266182 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-config\") pod \"dnsmasq-dns-57d769cc4f-khw5g\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.266314 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-khw5g\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.289222 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hfc8\" (UniqueName: \"kubernetes.io/projected/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-kube-api-access-6hfc8\") pod \"dnsmasq-dns-57d769cc4f-khw5g\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.390150 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.514857 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l9xj8"] Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.815958 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.817615 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.822861 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.823005 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-tfvdm" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.823072 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.825396 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.825599 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.825822 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.828229 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.849081 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-khw5g"] Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.861433 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.880395 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.881402 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.881438 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.881502 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.881532 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-config-data\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.881554 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.881589 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.881610 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.881625 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.881649 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jnjb\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-kube-api-access-5jnjb\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.881669 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.893449 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" event={"ID":"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab","Type":"ContainerStarted","Data":"f10e19b907a1c08c17a3e19fcaf1c461fc1016922448c65726a4bc0ef99acd96"} Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.982783 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jnjb\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-kube-api-access-5jnjb\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.982836 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.982862 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.982898 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.982917 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.982940 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.982973 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-config-data\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.982996 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.983035 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.983102 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.983120 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.983762 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.983971 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.984208 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.984472 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.984541 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.985401 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-config-data\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:31 crc kubenswrapper[4631]: I1129 04:28:31.991645 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.008259 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.021254 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.022586 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jnjb\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-kube-api-access-5jnjb\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.022931 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.052323 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " pod="openstack/rabbitmq-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.156782 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.202754 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.203959 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.206490 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.206845 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.207257 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.207466 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.208042 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-7qszg" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.208198 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.209396 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.222557 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287267 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287667 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287707 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287729 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287751 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287814 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287852 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287879 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287935 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287961 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.287990 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dgpk\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-kube-api-access-2dgpk\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.391746 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.391822 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.391849 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.391908 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.391958 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.391983 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dgpk\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-kube-api-access-2dgpk\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.392003 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.392019 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.392112 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.392131 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.392167 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.393575 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.394787 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.395094 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.395482 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.396957 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.398087 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.416693 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dgpk\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-kube-api-access-2dgpk\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.430202 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.430581 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.434179 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.435664 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.456197 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.525413 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.688953 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 04:28:32 crc kubenswrapper[4631]: I1129 04:28:32.913556 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" event={"ID":"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c","Type":"ContainerStarted","Data":"075ff730e48c703a10615102d42d41b16ff377c32cf7b5484331df4340b06175"} Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.552440 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.553790 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.556259 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.557189 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.557252 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-4gnwc" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.559625 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.571315 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.572028 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.720451 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2197f066-a879-4131-9e49-4d188a01db93-config-data-default\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.720485 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmghx\" (UniqueName: \"kubernetes.io/projected/2197f066-a879-4131-9e49-4d188a01db93-kube-api-access-qmghx\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.720554 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2197f066-a879-4131-9e49-4d188a01db93-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.720576 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2197f066-a879-4131-9e49-4d188a01db93-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.720592 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2197f066-a879-4131-9e49-4d188a01db93-kolla-config\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.720605 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2197f066-a879-4131-9e49-4d188a01db93-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.720621 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2197f066-a879-4131-9e49-4d188a01db93-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.720645 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.821574 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2197f066-a879-4131-9e49-4d188a01db93-config-data-default\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.821607 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmghx\" (UniqueName: \"kubernetes.io/projected/2197f066-a879-4131-9e49-4d188a01db93-kube-api-access-qmghx\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.821677 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2197f066-a879-4131-9e49-4d188a01db93-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.821695 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2197f066-a879-4131-9e49-4d188a01db93-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.821710 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2197f066-a879-4131-9e49-4d188a01db93-kolla-config\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.821726 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2197f066-a879-4131-9e49-4d188a01db93-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.821743 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2197f066-a879-4131-9e49-4d188a01db93-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.821766 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.822034 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.828478 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2197f066-a879-4131-9e49-4d188a01db93-config-data-default\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.829161 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2197f066-a879-4131-9e49-4d188a01db93-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.833030 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2197f066-a879-4131-9e49-4d188a01db93-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.833465 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2197f066-a879-4131-9e49-4d188a01db93-kolla-config\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.843192 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2197f066-a879-4131-9e49-4d188a01db93-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.855131 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2197f066-a879-4131-9e49-4d188a01db93-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.859902 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmghx\" (UniqueName: \"kubernetes.io/projected/2197f066-a879-4131-9e49-4d188a01db93-kube-api-access-qmghx\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.860221 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"2197f066-a879-4131-9e49-4d188a01db93\") " pod="openstack/openstack-galera-0" Nov 29 04:28:33 crc kubenswrapper[4631]: I1129 04:28:33.923791 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 29 04:28:34 crc kubenswrapper[4631]: I1129 04:28:34.926424 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 29 04:28:34 crc kubenswrapper[4631]: I1129 04:28:34.927830 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:34 crc kubenswrapper[4631]: I1129 04:28:34.929354 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-dr76x" Nov 29 04:28:34 crc kubenswrapper[4631]: I1129 04:28:34.929746 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 29 04:28:34 crc kubenswrapper[4631]: I1129 04:28:34.944139 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 29 04:28:34 crc kubenswrapper[4631]: I1129 04:28:34.944458 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 29 04:28:34 crc kubenswrapper[4631]: I1129 04:28:34.953657 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.046050 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.046089 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.046121 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.046194 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rhhg\" (UniqueName: \"kubernetes.io/projected/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-kube-api-access-9rhhg\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.046218 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.046236 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.046260 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.046284 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.089846 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.102512 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.110636 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-f62qx" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.111142 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.112882 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.119865 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.169177 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rhhg\" (UniqueName: \"kubernetes.io/projected/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-kube-api-access-9rhhg\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.169273 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.169309 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.169370 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.169443 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.169467 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.169487 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.169554 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.170177 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.171079 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.171819 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.173537 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.175293 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.190764 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.209228 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rhhg\" (UniqueName: \"kubernetes.io/projected/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-kube-api-access-9rhhg\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.220910 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9989c8e-3a12-49c9-89e0-d13778a4c3d4-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.250014 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c9989c8e-3a12-49c9-89e0-d13778a4c3d4\") " pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.271395 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cqrj\" (UniqueName: \"kubernetes.io/projected/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-kube-api-access-2cqrj\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.271471 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-kolla-config\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.271536 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.271561 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.271577 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-config-data\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.374098 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.374160 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.374177 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-config-data\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.374218 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cqrj\" (UniqueName: \"kubernetes.io/projected/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-kube-api-access-2cqrj\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.374270 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-kolla-config\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.375003 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-kolla-config\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.375650 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-config-data\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.379136 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.379154 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.389572 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cqrj\" (UniqueName: \"kubernetes.io/projected/a706cd5e-48d4-44a0-b8f5-b97ac5e39a16-kube-api-access-2cqrj\") pod \"memcached-0\" (UID: \"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16\") " pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.433790 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 29 04:28:35 crc kubenswrapper[4631]: I1129 04:28:35.554279 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 29 04:28:37 crc kubenswrapper[4631]: I1129 04:28:37.465754 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 04:28:37 crc kubenswrapper[4631]: I1129 04:28:37.468913 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 04:28:37 crc kubenswrapper[4631]: I1129 04:28:37.474258 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-kvk46" Nov 29 04:28:37 crc kubenswrapper[4631]: I1129 04:28:37.483376 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 04:28:37 crc kubenswrapper[4631]: I1129 04:28:37.519778 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mcw6\" (UniqueName: \"kubernetes.io/projected/03cdf7d8-fc05-44d0-a4a9-b62239838053-kube-api-access-9mcw6\") pod \"kube-state-metrics-0\" (UID: \"03cdf7d8-fc05-44d0-a4a9-b62239838053\") " pod="openstack/kube-state-metrics-0" Nov 29 04:28:37 crc kubenswrapper[4631]: I1129 04:28:37.620869 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mcw6\" (UniqueName: \"kubernetes.io/projected/03cdf7d8-fc05-44d0-a4a9-b62239838053-kube-api-access-9mcw6\") pod \"kube-state-metrics-0\" (UID: \"03cdf7d8-fc05-44d0-a4a9-b62239838053\") " pod="openstack/kube-state-metrics-0" Nov 29 04:28:37 crc kubenswrapper[4631]: I1129 04:28:37.640222 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mcw6\" (UniqueName: \"kubernetes.io/projected/03cdf7d8-fc05-44d0-a4a9-b62239838053-kube-api-access-9mcw6\") pod \"kube-state-metrics-0\" (UID: \"03cdf7d8-fc05-44d0-a4a9-b62239838053\") " pod="openstack/kube-state-metrics-0" Nov 29 04:28:37 crc kubenswrapper[4631]: I1129 04:28:37.843197 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 04:28:37 crc kubenswrapper[4631]: I1129 04:28:37.966254 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fdd7deaa-61f9-48f4-96c2-6d10d8df4192","Type":"ContainerStarted","Data":"3d3ed012a1a820aa2db6ffc3a0ac8b21f468449854fcdada90ed8cf97c7b028c"} Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.618231 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-fc5cp"] Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.619104 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.623148 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-jxtng" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.623168 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.623357 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.629066 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fc5cp"] Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.650255 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-kl2kj"] Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.652425 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.680714 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-kl2kj"] Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774043 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-scripts\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774094 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9dac72cc-94dd-4863-92c6-99296142fafb-scripts\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774153 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9dac72cc-94dd-4863-92c6-99296142fafb-var-run-ovn\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774179 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-var-log\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774212 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt4p9\" (UniqueName: \"kubernetes.io/projected/9dac72cc-94dd-4863-92c6-99296142fafb-kube-api-access-nt4p9\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774235 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9dac72cc-94dd-4863-92c6-99296142fafb-var-log-ovn\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774274 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-var-lib\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774344 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9dac72cc-94dd-4863-92c6-99296142fafb-var-run\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774366 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-var-run\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774390 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dac72cc-94dd-4863-92c6-99296142fafb-ovn-controller-tls-certs\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774425 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqc4d\" (UniqueName: \"kubernetes.io/projected/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-kube-api-access-fqc4d\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774450 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-etc-ovs\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.774485 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dac72cc-94dd-4863-92c6-99296142fafb-combined-ca-bundle\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.875455 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9dac72cc-94dd-4863-92c6-99296142fafb-scripts\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.875763 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9dac72cc-94dd-4863-92c6-99296142fafb-var-run-ovn\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.875864 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-var-log\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.875943 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt4p9\" (UniqueName: \"kubernetes.io/projected/9dac72cc-94dd-4863-92c6-99296142fafb-kube-api-access-nt4p9\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876020 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9dac72cc-94dd-4863-92c6-99296142fafb-var-log-ovn\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876112 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-var-lib\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876215 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9dac72cc-94dd-4863-92c6-99296142fafb-var-run\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876273 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9dac72cc-94dd-4863-92c6-99296142fafb-var-run-ovn\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876287 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-var-run\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876377 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dac72cc-94dd-4863-92c6-99296142fafb-ovn-controller-tls-certs\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876386 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9dac72cc-94dd-4863-92c6-99296142fafb-var-log-ovn\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876469 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqc4d\" (UniqueName: \"kubernetes.io/projected/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-kube-api-access-fqc4d\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876501 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-etc-ovs\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876550 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dac72cc-94dd-4863-92c6-99296142fafb-combined-ca-bundle\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876597 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-scripts\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.876294 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-var-log\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.877171 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-etc-ovs\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.877222 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-var-lib\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.877390 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-var-run\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.877479 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9dac72cc-94dd-4863-92c6-99296142fafb-var-run\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.879433 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-scripts\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.880969 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9dac72cc-94dd-4863-92c6-99296142fafb-scripts\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.881602 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/9dac72cc-94dd-4863-92c6-99296142fafb-ovn-controller-tls-certs\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.895907 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9dac72cc-94dd-4863-92c6-99296142fafb-combined-ca-bundle\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.901065 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqc4d\" (UniqueName: \"kubernetes.io/projected/1334e52e-4dbd-4c2d-bd05-d19f59ef722b-kube-api-access-fqc4d\") pod \"ovn-controller-ovs-kl2kj\" (UID: \"1334e52e-4dbd-4c2d-bd05-d19f59ef722b\") " pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.914943 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt4p9\" (UniqueName: \"kubernetes.io/projected/9dac72cc-94dd-4863-92c6-99296142fafb-kube-api-access-nt4p9\") pod \"ovn-controller-fc5cp\" (UID: \"9dac72cc-94dd-4863-92c6-99296142fafb\") " pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.934785 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fc5cp" Nov 29 04:28:40 crc kubenswrapper[4631]: I1129 04:28:40.972037 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.059485 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.060933 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.064139 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-rttrc" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.064592 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.065085 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.065136 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.065621 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.088269 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.226376 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.226457 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.226497 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.226530 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-config\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.226610 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.226644 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.226823 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s527k\" (UniqueName: \"kubernetes.io/projected/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-kube-api-access-s527k\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.226906 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.259900 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.262107 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.264767 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-mc7x2" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.265071 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.265566 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.267386 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.280212 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.328570 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.328624 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.328652 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.328673 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.328689 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-config\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.328707 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.328755 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s527k\" (UniqueName: \"kubernetes.io/projected/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-kube-api-access-s527k\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.328798 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.329217 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.330237 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-config\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.330948 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.331088 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.337504 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.338255 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.349012 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.355685 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.356723 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s527k\" (UniqueName: \"kubernetes.io/projected/5bd78ee0-c12e-4d6b-a47d-3652c3150c8d-kube-api-access-s527k\") pod \"ovsdbserver-nb-0\" (UID: \"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d\") " pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.394166 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.430066 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.430114 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.430157 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.430179 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.430394 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.430508 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-config\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.430528 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67wfh\" (UniqueName: \"kubernetes.io/projected/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-kube-api-access-67wfh\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.430642 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.532298 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.532354 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.532392 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.532428 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-config\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.532446 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67wfh\" (UniqueName: \"kubernetes.io/projected/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-kube-api-access-67wfh\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.532487 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.532517 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.532533 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.532622 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.532917 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.533693 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.536657 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-config\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.537860 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.539029 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.540444 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.552999 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.573488 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67wfh\" (UniqueName: \"kubernetes.io/projected/c80339e5-63b2-451d-a7fb-25ef7a2fba6a-kube-api-access-67wfh\") pod \"ovsdbserver-sb-0\" (UID: \"c80339e5-63b2-451d-a7fb-25ef7a2fba6a\") " pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:44 crc kubenswrapper[4631]: I1129 04:28:44.579104 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 29 04:28:50 crc kubenswrapper[4631]: E1129 04:28:50.580775 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 29 04:28:50 crc kubenswrapper[4631]: E1129 04:28:50.581663 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-msxb8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-95trz_openstack(d802b479-6972-4f7e-825e-c07ca6b5a5fc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:28:50 crc kubenswrapper[4631]: E1129 04:28:50.582897 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" podUID="d802b479-6972-4f7e-825e-c07ca6b5a5fc" Nov 29 04:28:50 crc kubenswrapper[4631]: I1129 04:28:50.715757 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:28:50 crc kubenswrapper[4631]: I1129 04:28:50.715826 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:28:50 crc kubenswrapper[4631]: I1129 04:28:50.715886 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:28:50 crc kubenswrapper[4631]: I1129 04:28:50.716760 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0d61c7b70ecd9c7737b4b7e588d56ad7e8044dda6cfe17bbe23a704a996d9bc8"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:28:50 crc kubenswrapper[4631]: I1129 04:28:50.716837 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://0d61c7b70ecd9c7737b4b7e588d56ad7e8044dda6cfe17bbe23a704a996d9bc8" gracePeriod=600 Nov 29 04:28:50 crc kubenswrapper[4631]: E1129 04:28:50.783341 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 29 04:28:50 crc kubenswrapper[4631]: E1129 04:28:50.783518 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v96qp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-httvc_openstack(0c8005bc-207c-4fbc-90d2-01a2ee002123): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:28:50 crc kubenswrapper[4631]: E1129 04:28:50.784931 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" podUID="0c8005bc-207c-4fbc-90d2-01a2ee002123" Nov 29 04:28:50 crc kubenswrapper[4631]: E1129 04:28:50.872270 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 29 04:28:50 crc kubenswrapper[4631]: E1129 04:28:50.872764 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bzzsh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-l9xj8_openstack(dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:28:50 crc kubenswrapper[4631]: E1129 04:28:50.874037 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" podUID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.104799 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="0d61c7b70ecd9c7737b4b7e588d56ad7e8044dda6cfe17bbe23a704a996d9bc8" exitCode=0 Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.104951 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"0d61c7b70ecd9c7737b4b7e588d56ad7e8044dda6cfe17bbe23a704a996d9bc8"} Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.104982 4631 scope.go:117] "RemoveContainer" containerID="d24bc233b5493c7d82c41dde646e52c2ccbd2abaf110835404b67654167e1ec2" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.264213 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.728008 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.732322 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.740775 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.749773 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.750769 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.777534 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.788758 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v96qp\" (UniqueName: \"kubernetes.io/projected/0c8005bc-207c-4fbc-90d2-01a2ee002123-kube-api-access-v96qp\") pod \"0c8005bc-207c-4fbc-90d2-01a2ee002123\" (UID: \"0c8005bc-207c-4fbc-90d2-01a2ee002123\") " Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.788820 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c8005bc-207c-4fbc-90d2-01a2ee002123-config\") pod \"0c8005bc-207c-4fbc-90d2-01a2ee002123\" (UID: \"0c8005bc-207c-4fbc-90d2-01a2ee002123\") " Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.788918 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-config\") pod \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.788966 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-dns-svc\") pod \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.789014 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msxb8\" (UniqueName: \"kubernetes.io/projected/d802b479-6972-4f7e-825e-c07ca6b5a5fc-kube-api-access-msxb8\") pod \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\" (UID: \"d802b479-6972-4f7e-825e-c07ca6b5a5fc\") " Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.790097 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c8005bc-207c-4fbc-90d2-01a2ee002123-config" (OuterVolumeSpecName: "config") pod "0c8005bc-207c-4fbc-90d2-01a2ee002123" (UID: "0c8005bc-207c-4fbc-90d2-01a2ee002123"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.790109 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-config" (OuterVolumeSpecName: "config") pod "d802b479-6972-4f7e-825e-c07ca6b5a5fc" (UID: "d802b479-6972-4f7e-825e-c07ca6b5a5fc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.790212 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d802b479-6972-4f7e-825e-c07ca6b5a5fc" (UID: "d802b479-6972-4f7e-825e-c07ca6b5a5fc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.801535 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c8005bc-207c-4fbc-90d2-01a2ee002123-kube-api-access-v96qp" (OuterVolumeSpecName: "kube-api-access-v96qp") pod "0c8005bc-207c-4fbc-90d2-01a2ee002123" (UID: "0c8005bc-207c-4fbc-90d2-01a2ee002123"). InnerVolumeSpecName "kube-api-access-v96qp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.803810 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d802b479-6972-4f7e-825e-c07ca6b5a5fc-kube-api-access-msxb8" (OuterVolumeSpecName: "kube-api-access-msxb8") pod "d802b479-6972-4f7e-825e-c07ca6b5a5fc" (UID: "d802b479-6972-4f7e-825e-c07ca6b5a5fc"). InnerVolumeSpecName "kube-api-access-msxb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.856504 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fc5cp"] Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.890780 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.890824 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d802b479-6972-4f7e-825e-c07ca6b5a5fc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.890833 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msxb8\" (UniqueName: \"kubernetes.io/projected/d802b479-6972-4f7e-825e-c07ca6b5a5fc-kube-api-access-msxb8\") on node \"crc\" DevicePath \"\"" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.890846 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v96qp\" (UniqueName: \"kubernetes.io/projected/0c8005bc-207c-4fbc-90d2-01a2ee002123-kube-api-access-v96qp\") on node \"crc\" DevicePath \"\"" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.890855 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c8005bc-207c-4fbc-90d2-01a2ee002123-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:28:51 crc kubenswrapper[4631]: I1129 04:28:51.977156 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-kl2kj"] Nov 29 04:28:51 crc kubenswrapper[4631]: W1129 04:28:51.996181 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1334e52e_4dbd_4c2d_bd05_d19f59ef722b.slice/crio-6bfd67b07358775a6cce79822397cad7de0ce6dd28da3d92f3ab81307167ed0c WatchSource:0}: Error finding container 6bfd67b07358775a6cce79822397cad7de0ce6dd28da3d92f3ab81307167ed0c: Status 404 returned error can't find the container with id 6bfd67b07358775a6cce79822397cad7de0ce6dd28da3d92f3ab81307167ed0c Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.093790 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.113365 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" event={"ID":"d802b479-6972-4f7e-825e-c07ca6b5a5fc","Type":"ContainerDied","Data":"dc158dce9ad568a4bb8b73fe5739c0ceea6d4a4460e935182e63cc3ea0b90b75"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.113447 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-95trz" Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.120039 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kl2kj" event={"ID":"1334e52e-4dbd-4c2d-bd05-d19f59ef722b","Type":"ContainerStarted","Data":"6bfd67b07358775a6cce79822397cad7de0ce6dd28da3d92f3ab81307167ed0c"} Nov 29 04:28:52 crc kubenswrapper[4631]: W1129 04:28:52.121644 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5bd78ee0_c12e_4d6b_a47d_3652c3150c8d.slice/crio-e179c31ceea774a6e0b7e80ba824941211969e585ed978fa699d32f1f80c4c39 WatchSource:0}: Error finding container e179c31ceea774a6e0b7e80ba824941211969e585ed978fa699d32f1f80c4c39: Status 404 returned error can't find the container with id e179c31ceea774a6e0b7e80ba824941211969e585ed978fa699d32f1f80c4c39 Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.121656 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16","Type":"ContainerStarted","Data":"9177215614c42755ac896b51162104f21cf3a7478f14b83bc4b207fd98347a71"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.123441 4631 generic.go:334] "Generic (PLEG): container finished" podID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" containerID="147a742508142d21e49f839120315f1410b724603679fff06732e877a142dd77" exitCode=0 Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.123493 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" event={"ID":"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab","Type":"ContainerDied","Data":"147a742508142d21e49f839120315f1410b724603679fff06732e877a142dd77"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.130284 4631 generic.go:334] "Generic (PLEG): container finished" podID="0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" containerID="34b817d0be2f79651533271daae3f64ad7cde6db922cdc416f3b6a48c349fe59" exitCode=0 Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.130632 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" event={"ID":"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c","Type":"ContainerDied","Data":"34b817d0be2f79651533271daae3f64ad7cde6db922cdc416f3b6a48c349fe59"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.134527 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"b907ff8791c5156baa82f06284d01d372fbfcb2495bb80ab099417356b8d8104"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.144375 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"03cdf7d8-fc05-44d0-a4a9-b62239838053","Type":"ContainerStarted","Data":"0401ec8e0ea3dee8b6dd060585b89e95a9c61ac739a4dbe60b24da75aa69ee4b"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.149351 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c9989c8e-3a12-49c9-89e0-d13778a4c3d4","Type":"ContainerStarted","Data":"5eda6edc1aa2303a063156c6d1edb0296ead052bd94e95e8666a67583e2adb63"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.159579 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" event={"ID":"0c8005bc-207c-4fbc-90d2-01a2ee002123","Type":"ContainerDied","Data":"71a8f262930967f27f8a6066a419d02d44146a65910560ae30dd14531e8f9d09"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.159819 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-httvc" Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.167101 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fc5cp" event={"ID":"9dac72cc-94dd-4863-92c6-99296142fafb","Type":"ContainerStarted","Data":"7b034e1828af8f349d84872face766179ba2c5c87576ea29b1e6a3ed9a047afc"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.171232 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2197f066-a879-4131-9e49-4d188a01db93","Type":"ContainerStarted","Data":"df3d52bf0d9b7b72abcaede9339f281d1bf7d966ac527420816fd3e643d78d6e"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.173153 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6","Type":"ContainerStarted","Data":"2dd0cf35aa5f76e54f1e1dbc2d43a400a10ec66174deda4cafe39c895fd02e5c"} Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.504799 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.599292 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-95trz"] Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.616388 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-95trz"] Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.637340 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-httvc"] Nov 29 04:28:52 crc kubenswrapper[4631]: I1129 04:28:52.651679 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-httvc"] Nov 29 04:28:53 crc kubenswrapper[4631]: I1129 04:28:53.184194 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d","Type":"ContainerStarted","Data":"e179c31ceea774a6e0b7e80ba824941211969e585ed978fa699d32f1f80c4c39"} Nov 29 04:28:53 crc kubenswrapper[4631]: I1129 04:28:53.186636 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fdd7deaa-61f9-48f4-96c2-6d10d8df4192","Type":"ContainerStarted","Data":"43c5e7fb8065a2ab8e17fc600e071e2631051fa004cfa2210ac324d8928cdde9"} Nov 29 04:28:53 crc kubenswrapper[4631]: I1129 04:28:53.190539 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6","Type":"ContainerStarted","Data":"5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396"} Nov 29 04:28:53 crc kubenswrapper[4631]: I1129 04:28:53.200856 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"c80339e5-63b2-451d-a7fb-25ef7a2fba6a","Type":"ContainerStarted","Data":"ca050029309e9d1a19a60a9681a8ae5cac9f662e43ae260b5ce4372a13126252"} Nov 29 04:28:53 crc kubenswrapper[4631]: I1129 04:28:53.207721 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" event={"ID":"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab","Type":"ContainerStarted","Data":"f69bc021c92fde0145a4fb454ec8a1e69d5e302c40019770fdd879b349dac3b8"} Nov 29 04:28:53 crc kubenswrapper[4631]: I1129 04:28:53.208011 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:28:53 crc kubenswrapper[4631]: I1129 04:28:53.243737 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c8005bc-207c-4fbc-90d2-01a2ee002123" path="/var/lib/kubelet/pods/0c8005bc-207c-4fbc-90d2-01a2ee002123/volumes" Nov 29 04:28:53 crc kubenswrapper[4631]: I1129 04:28:53.244120 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d802b479-6972-4f7e-825e-c07ca6b5a5fc" path="/var/lib/kubelet/pods/d802b479-6972-4f7e-825e-c07ca6b5a5fc/volumes" Nov 29 04:28:53 crc kubenswrapper[4631]: I1129 04:28:53.389823 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" podStartSLOduration=-9223372013.464968 podStartE2EDuration="23.389807535s" podCreationTimestamp="2025-11-29 04:28:30 +0000 UTC" firstStartedPulling="2025-11-29 04:28:31.530856425 +0000 UTC m=+1038.595359939" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:28:53.389677381 +0000 UTC m=+1060.454180895" watchObservedRunningTime="2025-11-29 04:28:53.389807535 +0000 UTC m=+1060.454311049" Nov 29 04:28:54 crc kubenswrapper[4631]: I1129 04:28:54.216188 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" event={"ID":"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c","Type":"ContainerStarted","Data":"be14ccb1f624a567184141af2c0d81fd4d343f751eb382db1a795aadceaf81fa"} Nov 29 04:28:55 crc kubenswrapper[4631]: I1129 04:28:55.227827 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:29:00 crc kubenswrapper[4631]: I1129 04:29:00.861604 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:29:00 crc kubenswrapper[4631]: I1129 04:29:00.903621 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" podStartSLOduration=10.779628077 podStartE2EDuration="29.903589759s" podCreationTimestamp="2025-11-29 04:28:31 +0000 UTC" firstStartedPulling="2025-11-29 04:28:31.892257074 +0000 UTC m=+1038.956760578" lastFinishedPulling="2025-11-29 04:28:51.016218746 +0000 UTC m=+1058.080722260" observedRunningTime="2025-11-29 04:28:54.235820868 +0000 UTC m=+1061.300324382" watchObservedRunningTime="2025-11-29 04:29:00.903589759 +0000 UTC m=+1067.968093313" Nov 29 04:29:01 crc kubenswrapper[4631]: I1129 04:29:01.392245 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:29:01 crc kubenswrapper[4631]: I1129 04:29:01.459002 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l9xj8"] Nov 29 04:29:01 crc kubenswrapper[4631]: I1129 04:29:01.459410 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" podUID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" containerName="dnsmasq-dns" containerID="cri-o://f69bc021c92fde0145a4fb454ec8a1e69d5e302c40019770fdd879b349dac3b8" gracePeriod=10 Nov 29 04:29:03 crc kubenswrapper[4631]: I1129 04:29:03.290269 4631 generic.go:334] "Generic (PLEG): container finished" podID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" containerID="f69bc021c92fde0145a4fb454ec8a1e69d5e302c40019770fdd879b349dac3b8" exitCode=0 Nov 29 04:29:03 crc kubenswrapper[4631]: I1129 04:29:03.290391 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" event={"ID":"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab","Type":"ContainerDied","Data":"f69bc021c92fde0145a4fb454ec8a1e69d5e302c40019770fdd879b349dac3b8"} Nov 29 04:29:03 crc kubenswrapper[4631]: I1129 04:29:03.953764 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-wc5tp"] Nov 29 04:29:03 crc kubenswrapper[4631]: I1129 04:29:03.954758 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:03 crc kubenswrapper[4631]: I1129 04:29:03.960620 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.025422 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wc5tp"] Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.094073 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-config\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.094395 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.094520 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-ovs-rundir\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.094599 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qchxp\" (UniqueName: \"kubernetes.io/projected/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-kube-api-access-qchxp\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.094694 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-combined-ca-bundle\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.094773 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-ovn-rundir\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.192603 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-fzlrr"] Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.196485 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qchxp\" (UniqueName: \"kubernetes.io/projected/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-kube-api-access-qchxp\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.196552 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-combined-ca-bundle\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.196590 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-ovn-rundir\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.197156 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-ovn-rundir\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.197478 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-config\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.197504 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.197558 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-ovs-rundir\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.197648 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-ovs-rundir\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.198146 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-config\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.204216 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-combined-ca-bundle\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.213187 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-fzlrr"] Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.227244 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.229597 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qchxp\" (UniqueName: \"kubernetes.io/projected/3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf-kube-api-access-qchxp\") pod \"ovn-controller-metrics-wc5tp\" (UID: \"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf\") " pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.267995 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wc5tp" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.299038 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-config\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.299906 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.299997 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.300107 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhh2b\" (UniqueName: \"kubernetes.io/projected/92f947c4-44fc-4664-85b8-2bed16ef0982-kube-api-access-nhh2b\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.402156 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-config\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.402198 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.402220 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.402265 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhh2b\" (UniqueName: \"kubernetes.io/projected/92f947c4-44fc-4664-85b8-2bed16ef0982-kube-api-access-nhh2b\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: E1129 04:29:04.402385 4631 configmap.go:193] Couldn't get configMap openstack/ovsdbserver-nb: object "openstack"/"ovsdbserver-nb" not registered Nov 29 04:29:04 crc kubenswrapper[4631]: E1129 04:29:04.402465 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-ovsdbserver-nb podName:92f947c4-44fc-4664-85b8-2bed16ef0982 nodeName:}" failed. No retries permitted until 2025-11-29 04:29:04.902448112 +0000 UTC m=+1071.966951626 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "ovsdbserver-nb" (UniqueName: "kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-ovsdbserver-nb") pod "dnsmasq-dns-7fd796d7df-fzlrr" (UID: "92f947c4-44fc-4664-85b8-2bed16ef0982") : object "openstack"/"ovsdbserver-nb" not registered Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.403117 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-config\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.403229 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.420906 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhh2b\" (UniqueName: \"kubernetes.io/projected/92f947c4-44fc-4664-85b8-2bed16ef0982-kube-api-access-nhh2b\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.594102 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-fzlrr"] Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.623489 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f8659"] Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.625074 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.628714 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.629102 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.633668 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f8659"] Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.707355 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.707412 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-config\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.707483 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.707567 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.707625 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-826rj\" (UniqueName: \"kubernetes.io/projected/03f0a309-9927-471e-b4ff-caf759d1b050-kube-api-access-826rj\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.809061 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.809445 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-826rj\" (UniqueName: \"kubernetes.io/projected/03f0a309-9927-471e-b4ff-caf759d1b050-kube-api-access-826rj\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.809615 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.809991 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.810402 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.810577 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-config\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.810838 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.811493 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-config\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.811576 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.834994 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-826rj\" (UniqueName: \"kubernetes.io/projected/03f0a309-9927-471e-b4ff-caf759d1b050-kube-api-access-826rj\") pod \"dnsmasq-dns-86db49b7ff-f8659\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.892833 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: E1129 04:29:04.893477 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ovsdbserver-nb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" podUID="92f947c4-44fc-4664-85b8-2bed16ef0982" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.913260 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.914057 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-fzlrr\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:04 crc kubenswrapper[4631]: I1129 04:29:04.948484 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.306675 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.321459 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.422079 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhh2b\" (UniqueName: \"kubernetes.io/projected/92f947c4-44fc-4664-85b8-2bed16ef0982-kube-api-access-nhh2b\") pod \"92f947c4-44fc-4664-85b8-2bed16ef0982\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.422213 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-dns-svc\") pod \"92f947c4-44fc-4664-85b8-2bed16ef0982\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.422241 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-config\") pod \"92f947c4-44fc-4664-85b8-2bed16ef0982\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.422297 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-ovsdbserver-nb\") pod \"92f947c4-44fc-4664-85b8-2bed16ef0982\" (UID: \"92f947c4-44fc-4664-85b8-2bed16ef0982\") " Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.422835 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "92f947c4-44fc-4664-85b8-2bed16ef0982" (UID: "92f947c4-44fc-4664-85b8-2bed16ef0982"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.423431 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-config" (OuterVolumeSpecName: "config") pod "92f947c4-44fc-4664-85b8-2bed16ef0982" (UID: "92f947c4-44fc-4664-85b8-2bed16ef0982"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.423810 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "92f947c4-44fc-4664-85b8-2bed16ef0982" (UID: "92f947c4-44fc-4664-85b8-2bed16ef0982"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.427634 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92f947c4-44fc-4664-85b8-2bed16ef0982-kube-api-access-nhh2b" (OuterVolumeSpecName: "kube-api-access-nhh2b") pod "92f947c4-44fc-4664-85b8-2bed16ef0982" (UID: "92f947c4-44fc-4664-85b8-2bed16ef0982"). InnerVolumeSpecName "kube-api-access-nhh2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.523871 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhh2b\" (UniqueName: \"kubernetes.io/projected/92f947c4-44fc-4664-85b8-2bed16ef0982-kube-api-access-nhh2b\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.523922 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.523944 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.523961 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92f947c4-44fc-4664-85b8-2bed16ef0982-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:05 crc kubenswrapper[4631]: I1129 04:29:05.860376 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" podUID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.96:5353: connect: connection refused" Nov 29 04:29:06 crc kubenswrapper[4631]: I1129 04:29:06.316225 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-fzlrr" Nov 29 04:29:06 crc kubenswrapper[4631]: I1129 04:29:06.362323 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-fzlrr"] Nov 29 04:29:06 crc kubenswrapper[4631]: I1129 04:29:06.370756 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-fzlrr"] Nov 29 04:29:07 crc kubenswrapper[4631]: I1129 04:29:07.256921 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92f947c4-44fc-4664-85b8-2bed16ef0982" path="/var/lib/kubelet/pods/92f947c4-44fc-4664-85b8-2bed16ef0982/volumes" Nov 29 04:29:10 crc kubenswrapper[4631]: E1129 04:29:10.615161 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified" Nov 29 04:29:10 crc kubenswrapper[4631]: E1129 04:29:10.615855 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:ovsdb-server-init,Image:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,Command:[/usr/local/bin/container-scripts/init-ovsdb-server.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh65dh5cdh666h94h5c6h98h5bdh694hbdh5fbh5f9h5c6hcch698h5b5hcbh55h664h658h664h66hcdh645h67hcch6ch5b4h5d4h6fh65dh555q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-ovs,ReadOnly:false,MountPath:/etc/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log,ReadOnly:false,MountPath:/var/log/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-lib,ReadOnly:false,MountPath:/var/lib/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fqc4d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-ovs-kl2kj_openstack(1334e52e-4dbd-4c2d-bd05-d19f59ef722b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:29:10 crc kubenswrapper[4631]: E1129 04:29:10.617025 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-ovs-kl2kj" podUID="1334e52e-4dbd-4c2d-bd05-d19f59ef722b" Nov 29 04:29:10 crc kubenswrapper[4631]: E1129 04:29:10.947380 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified" Nov 29 04:29:10 crc kubenswrapper[4631]: E1129 04:29:10.947559 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55fh65dh5cdh666h94h5c6h98h5bdh694hbdh5fbh5f9h5c6hcch698h5b5hcbh55h664h658h664h66hcdh645h67hcch6ch5b4h5d4h6fh65dh555q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nt4p9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-fc5cp_openstack(9dac72cc-94dd-4863-92c6-99296142fafb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:29:10 crc kubenswrapper[4631]: E1129 04:29:10.949493 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-fc5cp" podUID="9dac72cc-94dd-4863-92c6-99296142fafb" Nov 29 04:29:11 crc kubenswrapper[4631]: E1129 04:29:11.215155 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified" Nov 29 04:29:11 crc kubenswrapper[4631]: E1129 04:29:11.215344 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-nb,Image:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f8h5f5h545h657h8ch54dh7chc5h68bh8fh5ffh55fh696h594h87h565h58dh577h9fh5c5h57h668h5bch56fh698hbfh667h55dh5f4hc4hd8h575q,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-nb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s527k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(5bd78ee0-c12e-4d6b-a47d-3652c3150c8d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:29:11 crc kubenswrapper[4631]: E1129 04:29:11.350928 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified\\\"\"" pod="openstack/ovn-controller-fc5cp" podUID="9dac72cc-94dd-4863-92c6-99296142fafb" Nov 29 04:29:11 crc kubenswrapper[4631]: E1129 04:29:11.351703 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\\\"\"" pod="openstack/ovn-controller-ovs-kl2kj" podUID="1334e52e-4dbd-4c2d-bd05-d19f59ef722b" Nov 29 04:29:11 crc kubenswrapper[4631]: E1129 04:29:11.638422 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified" Nov 29 04:29:11 crc kubenswrapper[4631]: E1129 04:29:11.638593 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-sb,Image:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nbh5cch666h558h5f9h584h645h66dh59ch598h56dh56h75h659h5dbh8dhbh566h89h556h7fh5bchb7h548h664h67bhdch55dh5d8h67ch7fh55q,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-sb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-67wfh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-sb-0_openstack(c80339e5-63b2-451d-a7fb-25ef7a2fba6a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:29:11 crc kubenswrapper[4631]: I1129 04:29:11.721804 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:29:11 crc kubenswrapper[4631]: I1129 04:29:11.827049 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzzsh\" (UniqueName: \"kubernetes.io/projected/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-kube-api-access-bzzsh\") pod \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " Nov 29 04:29:11 crc kubenswrapper[4631]: I1129 04:29:11.827535 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-dns-svc\") pod \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " Nov 29 04:29:11 crc kubenswrapper[4631]: I1129 04:29:11.827680 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-config\") pod \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\" (UID: \"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab\") " Nov 29 04:29:11 crc kubenswrapper[4631]: I1129 04:29:11.845403 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-kube-api-access-bzzsh" (OuterVolumeSpecName: "kube-api-access-bzzsh") pod "dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" (UID: "dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab"). InnerVolumeSpecName "kube-api-access-bzzsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:11 crc kubenswrapper[4631]: I1129 04:29:11.883145 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-config" (OuterVolumeSpecName: "config") pod "dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" (UID: "dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:11 crc kubenswrapper[4631]: I1129 04:29:11.911943 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" (UID: "dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:11 crc kubenswrapper[4631]: I1129 04:29:11.931061 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:11 crc kubenswrapper[4631]: I1129 04:29:11.931086 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzzsh\" (UniqueName: \"kubernetes.io/projected/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-kube-api-access-bzzsh\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:11 crc kubenswrapper[4631]: I1129 04:29:11.931097 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:12 crc kubenswrapper[4631]: I1129 04:29:12.251029 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f8659"] Nov 29 04:29:12 crc kubenswrapper[4631]: I1129 04:29:12.309243 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wc5tp"] Nov 29 04:29:12 crc kubenswrapper[4631]: I1129 04:29:12.366581 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" event={"ID":"dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab","Type":"ContainerDied","Data":"f10e19b907a1c08c17a3e19fcaf1c461fc1016922448c65726a4bc0ef99acd96"} Nov 29 04:29:12 crc kubenswrapper[4631]: I1129 04:29:12.366811 4631 scope.go:117] "RemoveContainer" containerID="f69bc021c92fde0145a4fb454ec8a1e69d5e302c40019770fdd879b349dac3b8" Nov 29 04:29:12 crc kubenswrapper[4631]: I1129 04:29:12.366856 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" Nov 29 04:29:12 crc kubenswrapper[4631]: I1129 04:29:12.392358 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l9xj8"] Nov 29 04:29:12 crc kubenswrapper[4631]: I1129 04:29:12.407746 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-l9xj8"] Nov 29 04:29:12 crc kubenswrapper[4631]: W1129 04:29:12.910940 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f2faef4_4711_4b58_9dd3_ee5bfc76dfaf.slice/crio-353d99d7164ee4ae746eda99671e5d234d2be5058e0e60b6c60f4b96f93209c7 WatchSource:0}: Error finding container 353d99d7164ee4ae746eda99671e5d234d2be5058e0e60b6c60f4b96f93209c7: Status 404 returned error can't find the container with id 353d99d7164ee4ae746eda99671e5d234d2be5058e0e60b6c60f4b96f93209c7 Nov 29 04:29:12 crc kubenswrapper[4631]: W1129 04:29:12.912229 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03f0a309_9927_471e_b4ff_caf759d1b050.slice/crio-ae5e80ad8fe8f957c4048300f4743b1897b371f7ef2a32967c7e8308dbd237ae WatchSource:0}: Error finding container ae5e80ad8fe8f957c4048300f4743b1897b371f7ef2a32967c7e8308dbd237ae: Status 404 returned error can't find the container with id ae5e80ad8fe8f957c4048300f4743b1897b371f7ef2a32967c7e8308dbd237ae Nov 29 04:29:12 crc kubenswrapper[4631]: I1129 04:29:12.922120 4631 scope.go:117] "RemoveContainer" containerID="147a742508142d21e49f839120315f1410b724603679fff06732e877a142dd77" Nov 29 04:29:12 crc kubenswrapper[4631]: E1129 04:29:12.984122 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 29 04:29:12 crc kubenswrapper[4631]: E1129 04:29:12.984184 4631 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 29 04:29:12 crc kubenswrapper[4631]: E1129 04:29:12.984350 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9mcw6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(03cdf7d8-fc05-44d0-a4a9-b62239838053): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 04:29:12 crc kubenswrapper[4631]: E1129 04:29:12.989511 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="03cdf7d8-fc05-44d0-a4a9-b62239838053" Nov 29 04:29:13 crc kubenswrapper[4631]: I1129 04:29:13.235288 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" path="/var/lib/kubelet/pods/dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab/volumes" Nov 29 04:29:13 crc kubenswrapper[4631]: I1129 04:29:13.384291 4631 generic.go:334] "Generic (PLEG): container finished" podID="03f0a309-9927-471e-b4ff-caf759d1b050" containerID="5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b" exitCode=0 Nov 29 04:29:13 crc kubenswrapper[4631]: I1129 04:29:13.384389 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" event={"ID":"03f0a309-9927-471e-b4ff-caf759d1b050","Type":"ContainerDied","Data":"5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b"} Nov 29 04:29:13 crc kubenswrapper[4631]: I1129 04:29:13.384414 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" event={"ID":"03f0a309-9927-471e-b4ff-caf759d1b050","Type":"ContainerStarted","Data":"ae5e80ad8fe8f957c4048300f4743b1897b371f7ef2a32967c7e8308dbd237ae"} Nov 29 04:29:13 crc kubenswrapper[4631]: I1129 04:29:13.388137 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2197f066-a879-4131-9e49-4d188a01db93","Type":"ContainerStarted","Data":"b4bbc43951aae43eb95ab7576646f98aa09cbe7d41f913924ca6c9ba3c469970"} Nov 29 04:29:13 crc kubenswrapper[4631]: I1129 04:29:13.395963 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c9989c8e-3a12-49c9-89e0-d13778a4c3d4","Type":"ContainerStarted","Data":"98b559259e856535a4b29563af9d41fd470616d3c3cf63e031039e4ce595d22d"} Nov 29 04:29:13 crc kubenswrapper[4631]: I1129 04:29:13.401234 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"a706cd5e-48d4-44a0-b8f5-b97ac5e39a16","Type":"ContainerStarted","Data":"29654c53f8e10da86edf23c5ebf215381b77760fa554372482d1d3dc52cb662c"} Nov 29 04:29:13 crc kubenswrapper[4631]: I1129 04:29:13.401965 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 29 04:29:13 crc kubenswrapper[4631]: I1129 04:29:13.411702 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wc5tp" event={"ID":"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf","Type":"ContainerStarted","Data":"353d99d7164ee4ae746eda99671e5d234d2be5058e0e60b6c60f4b96f93209c7"} Nov 29 04:29:13 crc kubenswrapper[4631]: E1129 04:29:13.414556 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="03cdf7d8-fc05-44d0-a4a9-b62239838053" Nov 29 04:29:13 crc kubenswrapper[4631]: I1129 04:29:13.445232 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=18.869000678 podStartE2EDuration="38.445217507s" podCreationTimestamp="2025-11-29 04:28:35 +0000 UTC" firstStartedPulling="2025-11-29 04:28:51.744469235 +0000 UTC m=+1058.808972749" lastFinishedPulling="2025-11-29 04:29:11.320686064 +0000 UTC m=+1078.385189578" observedRunningTime="2025-11-29 04:29:13.44247058 +0000 UTC m=+1080.506974094" watchObservedRunningTime="2025-11-29 04:29:13.445217507 +0000 UTC m=+1080.509721021" Nov 29 04:29:15 crc kubenswrapper[4631]: I1129 04:29:15.860637 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-666b6646f7-l9xj8" podUID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.96:5353: i/o timeout" Nov 29 04:29:16 crc kubenswrapper[4631]: E1129 04:29:16.708625 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="5bd78ee0-c12e-4d6b-a47d-3652c3150c8d" Nov 29 04:29:16 crc kubenswrapper[4631]: E1129 04:29:16.709689 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-sb-0" podUID="c80339e5-63b2-451d-a7fb-25ef7a2fba6a" Nov 29 04:29:17 crc kubenswrapper[4631]: I1129 04:29:17.464171 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wc5tp" event={"ID":"3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf","Type":"ContainerStarted","Data":"58eb276dd8851ce50faea4ceec8f66e7b8a1e357737ae9ed4a96be2b245bac17"} Nov 29 04:29:17 crc kubenswrapper[4631]: I1129 04:29:17.466859 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d","Type":"ContainerStarted","Data":"fdbd8f21057b36dca2fedcecfc4e58f96de965f468db104a1a0cf66408cf109f"} Nov 29 04:29:17 crc kubenswrapper[4631]: E1129 04:29:17.468911 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="5bd78ee0-c12e-4d6b-a47d-3652c3150c8d" Nov 29 04:29:17 crc kubenswrapper[4631]: I1129 04:29:17.469851 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" event={"ID":"03f0a309-9927-471e-b4ff-caf759d1b050","Type":"ContainerStarted","Data":"3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb"} Nov 29 04:29:17 crc kubenswrapper[4631]: I1129 04:29:17.470028 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:17 crc kubenswrapper[4631]: I1129 04:29:17.472263 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"c80339e5-63b2-451d-a7fb-25ef7a2fba6a","Type":"ContainerStarted","Data":"79f7962791454508f57c04fb4f6c8ff640228d7709425988b54eb094e1806c07"} Nov 29 04:29:17 crc kubenswrapper[4631]: E1129 04:29:17.474367 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="c80339e5-63b2-451d-a7fb-25ef7a2fba6a" Nov 29 04:29:17 crc kubenswrapper[4631]: I1129 04:29:17.492526 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-wc5tp" podStartSLOduration=11.097330865 podStartE2EDuration="14.492503276s" podCreationTimestamp="2025-11-29 04:29:03 +0000 UTC" firstStartedPulling="2025-11-29 04:29:12.922940427 +0000 UTC m=+1079.987443941" lastFinishedPulling="2025-11-29 04:29:16.318112828 +0000 UTC m=+1083.382616352" observedRunningTime="2025-11-29 04:29:17.490985659 +0000 UTC m=+1084.555489213" watchObservedRunningTime="2025-11-29 04:29:17.492503276 +0000 UTC m=+1084.557006830" Nov 29 04:29:17 crc kubenswrapper[4631]: I1129 04:29:17.558564 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" podStartSLOduration=13.558542312 podStartE2EDuration="13.558542312s" podCreationTimestamp="2025-11-29 04:29:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:29:17.55640478 +0000 UTC m=+1084.620908304" watchObservedRunningTime="2025-11-29 04:29:17.558542312 +0000 UTC m=+1084.623045836" Nov 29 04:29:18 crc kubenswrapper[4631]: E1129 04:29:18.485691 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="c80339e5-63b2-451d-a7fb-25ef7a2fba6a" Nov 29 04:29:18 crc kubenswrapper[4631]: E1129 04:29:18.485568 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="5bd78ee0-c12e-4d6b-a47d-3652c3150c8d" Nov 29 04:29:19 crc kubenswrapper[4631]: E1129 04:29:19.062866 4631 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2197f066_a879_4131_9e49_4d188a01db93.slice/crio-b4bbc43951aae43eb95ab7576646f98aa09cbe7d41f913924ca6c9ba3c469970.scope\": RecentStats: unable to find data in memory cache]" Nov 29 04:29:19 crc kubenswrapper[4631]: I1129 04:29:19.494592 4631 generic.go:334] "Generic (PLEG): container finished" podID="2197f066-a879-4131-9e49-4d188a01db93" containerID="b4bbc43951aae43eb95ab7576646f98aa09cbe7d41f913924ca6c9ba3c469970" exitCode=0 Nov 29 04:29:19 crc kubenswrapper[4631]: I1129 04:29:19.494824 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2197f066-a879-4131-9e49-4d188a01db93","Type":"ContainerDied","Data":"b4bbc43951aae43eb95ab7576646f98aa09cbe7d41f913924ca6c9ba3c469970"} Nov 29 04:29:19 crc kubenswrapper[4631]: I1129 04:29:19.497580 4631 generic.go:334] "Generic (PLEG): container finished" podID="c9989c8e-3a12-49c9-89e0-d13778a4c3d4" containerID="98b559259e856535a4b29563af9d41fd470616d3c3cf63e031039e4ce595d22d" exitCode=0 Nov 29 04:29:19 crc kubenswrapper[4631]: I1129 04:29:19.497633 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c9989c8e-3a12-49c9-89e0-d13778a4c3d4","Type":"ContainerDied","Data":"98b559259e856535a4b29563af9d41fd470616d3c3cf63e031039e4ce595d22d"} Nov 29 04:29:20 crc kubenswrapper[4631]: I1129 04:29:20.435461 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 29 04:29:20 crc kubenswrapper[4631]: I1129 04:29:20.505270 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2197f066-a879-4131-9e49-4d188a01db93","Type":"ContainerStarted","Data":"9bc8e7208ef61bd59cd9ccd014424cfba40b16b7d026cc2cedeaaa49df01edce"} Nov 29 04:29:20 crc kubenswrapper[4631]: I1129 04:29:20.507833 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c9989c8e-3a12-49c9-89e0-d13778a4c3d4","Type":"ContainerStarted","Data":"efdf261320991844299f4fa31334cc59c52790975223b272c7f5aa3e89630c26"} Nov 29 04:29:20 crc kubenswrapper[4631]: I1129 04:29:20.535352 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=26.958651303 podStartE2EDuration="48.535326299s" podCreationTimestamp="2025-11-29 04:28:32 +0000 UTC" firstStartedPulling="2025-11-29 04:28:51.324709747 +0000 UTC m=+1058.389213261" lastFinishedPulling="2025-11-29 04:29:12.901384743 +0000 UTC m=+1079.965888257" observedRunningTime="2025-11-29 04:29:20.527367875 +0000 UTC m=+1087.591871389" watchObservedRunningTime="2025-11-29 04:29:20.535326299 +0000 UTC m=+1087.599829813" Nov 29 04:29:20 crc kubenswrapper[4631]: I1129 04:29:20.573585 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=27.988107455 podStartE2EDuration="47.573573079s" podCreationTimestamp="2025-11-29 04:28:33 +0000 UTC" firstStartedPulling="2025-11-29 04:28:51.736985313 +0000 UTC m=+1058.801488827" lastFinishedPulling="2025-11-29 04:29:11.322450937 +0000 UTC m=+1078.386954451" observedRunningTime="2025-11-29 04:29:20.566658601 +0000 UTC m=+1087.631162115" watchObservedRunningTime="2025-11-29 04:29:20.573573079 +0000 UTC m=+1087.638076593" Nov 29 04:29:23 crc kubenswrapper[4631]: I1129 04:29:23.924658 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 29 04:29:23 crc kubenswrapper[4631]: I1129 04:29:23.924923 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 29 04:29:24 crc kubenswrapper[4631]: I1129 04:29:24.950650 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:24 crc kubenswrapper[4631]: I1129 04:29:24.995805 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-khw5g"] Nov 29 04:29:24 crc kubenswrapper[4631]: I1129 04:29:24.996068 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" podUID="0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" containerName="dnsmasq-dns" containerID="cri-o://be14ccb1f624a567184141af2c0d81fd4d343f751eb382db1a795aadceaf81fa" gracePeriod=10 Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.551589 4631 generic.go:334] "Generic (PLEG): container finished" podID="0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" containerID="be14ccb1f624a567184141af2c0d81fd4d343f751eb382db1a795aadceaf81fa" exitCode=0 Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.551672 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" event={"ID":"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c","Type":"ContainerDied","Data":"be14ccb1f624a567184141af2c0d81fd4d343f751eb382db1a795aadceaf81fa"} Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.554557 4631 generic.go:334] "Generic (PLEG): container finished" podID="fdd7deaa-61f9-48f4-96c2-6d10d8df4192" containerID="43c5e7fb8065a2ab8e17fc600e071e2631051fa004cfa2210ac324d8928cdde9" exitCode=0 Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.554672 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fdd7deaa-61f9-48f4-96c2-6d10d8df4192","Type":"ContainerDied","Data":"43c5e7fb8065a2ab8e17fc600e071e2631051fa004cfa2210ac324d8928cdde9"} Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.554738 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.554758 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.558359 4631 generic.go:334] "Generic (PLEG): container finished" podID="566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" containerID="5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396" exitCode=0 Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.558408 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6","Type":"ContainerDied","Data":"5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396"} Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.724431 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.852824 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hfc8\" (UniqueName: \"kubernetes.io/projected/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-kube-api-access-6hfc8\") pod \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.853149 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-config\") pod \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.853207 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-dns-svc\") pod \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\" (UID: \"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c\") " Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.868625 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-kube-api-access-6hfc8" (OuterVolumeSpecName: "kube-api-access-6hfc8") pod "0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" (UID: "0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c"). InnerVolumeSpecName "kube-api-access-6hfc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.903607 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-config" (OuterVolumeSpecName: "config") pod "0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" (UID: "0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.904381 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" (UID: "0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.955419 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hfc8\" (UniqueName: \"kubernetes.io/projected/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-kube-api-access-6hfc8\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.956311 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:25 crc kubenswrapper[4631]: I1129 04:29:25.956389 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.332225 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.408717 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.565316 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6","Type":"ContainerStarted","Data":"afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d"} Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.565709 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.570766 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.570794 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-khw5g" event={"ID":"0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c","Type":"ContainerDied","Data":"075ff730e48c703a10615102d42d41b16ff377c32cf7b5484331df4340b06175"} Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.570855 4631 scope.go:117] "RemoveContainer" containerID="be14ccb1f624a567184141af2c0d81fd4d343f751eb382db1a795aadceaf81fa" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.579586 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fc5cp" event={"ID":"9dac72cc-94dd-4863-92c6-99296142fafb","Type":"ContainerStarted","Data":"967ba64993c3e9c1960e50100fd96052941bffc26afe99bbd5c62b0f8853d33c"} Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.580249 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-fc5cp" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.583151 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fdd7deaa-61f9-48f4-96c2-6d10d8df4192","Type":"ContainerStarted","Data":"96b93f6eadde751b36470a5b4c837bf00d52b3177d3155bb316b665bf4fea62e"} Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.597888 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.600998 4631 scope.go:117] "RemoveContainer" containerID="34b817d0be2f79651533271daae3f64ad7cde6db922cdc416f3b6a48c349fe59" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.609862 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=55.609844124 podStartE2EDuration="55.609844124s" podCreationTimestamp="2025-11-29 04:28:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:29:26.602223939 +0000 UTC m=+1093.666727453" watchObservedRunningTime="2025-11-29 04:29:26.609844124 +0000 UTC m=+1093.674347638" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.665856 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=43.515435175 podStartE2EDuration="56.665839606s" podCreationTimestamp="2025-11-29 04:28:30 +0000 UTC" firstStartedPulling="2025-11-29 04:28:37.793749072 +0000 UTC m=+1044.858252586" lastFinishedPulling="2025-11-29 04:28:50.944153503 +0000 UTC m=+1058.008657017" observedRunningTime="2025-11-29 04:29:26.627612686 +0000 UTC m=+1093.692116210" watchObservedRunningTime="2025-11-29 04:29:26.665839606 +0000 UTC m=+1093.730343120" Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.679815 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-khw5g"] Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.688381 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-khw5g"] Nov 29 04:29:26 crc kubenswrapper[4631]: I1129 04:29:26.721534 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-fc5cp" podStartSLOduration=12.873530721 podStartE2EDuration="46.72151354s" podCreationTimestamp="2025-11-29 04:28:40 +0000 UTC" firstStartedPulling="2025-11-29 04:28:51.866929033 +0000 UTC m=+1058.931432547" lastFinishedPulling="2025-11-29 04:29:25.714911852 +0000 UTC m=+1092.779415366" observedRunningTime="2025-11-29 04:29:26.674074356 +0000 UTC m=+1093.738577870" watchObservedRunningTime="2025-11-29 04:29:26.72151354 +0000 UTC m=+1093.786017044" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.226357 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" path="/var/lib/kubelet/pods/0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c/volumes" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.593054 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"03cdf7d8-fc05-44d0-a4a9-b62239838053","Type":"ContainerStarted","Data":"b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4"} Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.593539 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.595597 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kl2kj" event={"ID":"1334e52e-4dbd-4c2d-bd05-d19f59ef722b","Type":"ContainerStarted","Data":"77f6db0b483f13ba6c9895e3b1cb4b0b25d07af5b964394f1e91225f3277c610"} Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.607708 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=15.627764324 podStartE2EDuration="50.607690379s" podCreationTimestamp="2025-11-29 04:28:37 +0000 UTC" firstStartedPulling="2025-11-29 04:28:51.747523119 +0000 UTC m=+1058.812026633" lastFinishedPulling="2025-11-29 04:29:26.727449174 +0000 UTC m=+1093.791952688" observedRunningTime="2025-11-29 04:29:27.606387678 +0000 UTC m=+1094.670891192" watchObservedRunningTime="2025-11-29 04:29:27.607690379 +0000 UTC m=+1094.672193893" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.888541 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-g5w92"] Nov 29 04:29:27 crc kubenswrapper[4631]: E1129 04:29:27.888844 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" containerName="dnsmasq-dns" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.888857 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" containerName="dnsmasq-dns" Nov 29 04:29:27 crc kubenswrapper[4631]: E1129 04:29:27.888868 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" containerName="init" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.888873 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" containerName="init" Nov 29 04:29:27 crc kubenswrapper[4631]: E1129 04:29:27.888886 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" containerName="dnsmasq-dns" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.888893 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" containerName="dnsmasq-dns" Nov 29 04:29:27 crc kubenswrapper[4631]: E1129 04:29:27.888903 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" containerName="init" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.888908 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" containerName="init" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.889050 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fcf8fb5-3cf8-4ef2-811f-6d48f8748f5c" containerName="dnsmasq-dns" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.889067 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd39dbf5-1277-44a2-9b7d-9af44dbfd0ab" containerName="dnsmasq-dns" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.889824 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.907046 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-g5w92"] Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.913933 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.913973 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-dns-svc\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.913991 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.914010 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-config\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:27 crc kubenswrapper[4631]: I1129 04:29:27.914229 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99w4p\" (UniqueName: \"kubernetes.io/projected/093fbd07-3966-4e87-bc8b-f007e875937f-kube-api-access-99w4p\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.015958 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.016220 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-dns-svc\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.016243 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.016266 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-config\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.016343 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99w4p\" (UniqueName: \"kubernetes.io/projected/093fbd07-3966-4e87-bc8b-f007e875937f-kube-api-access-99w4p\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.016923 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.017122 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.017214 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-dns-svc\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.017362 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-config\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.057808 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99w4p\" (UniqueName: \"kubernetes.io/projected/093fbd07-3966-4e87-bc8b-f007e875937f-kube-api-access-99w4p\") pod \"dnsmasq-dns-698758b865-g5w92\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.204026 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:28 crc kubenswrapper[4631]: I1129 04:29:28.761081 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-g5w92"] Nov 29 04:29:28 crc kubenswrapper[4631]: W1129 04:29:28.766695 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod093fbd07_3966_4e87_bc8b_f007e875937f.slice/crio-c080249d24027338138d511688594ad0eacf5913b5dcbd18e63e0ea28bfbdd96 WatchSource:0}: Error finding container c080249d24027338138d511688594ad0eacf5913b5dcbd18e63e0ea28bfbdd96: Status 404 returned error can't find the container with id c080249d24027338138d511688594ad0eacf5913b5dcbd18e63e0ea28bfbdd96 Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.033011 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.044889 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.055129 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.055261 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.055269 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.055671 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-5f7l6" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.074460 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.236255 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.236357 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.236390 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-cache\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.236423 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-lock\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.236507 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvxqv\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-kube-api-access-kvxqv\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.337711 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.337798 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-cache\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.337844 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-lock\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.337898 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvxqv\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-kube-api-access-kvxqv\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: E1129 04:29:29.337937 4631 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 29 04:29:29 crc kubenswrapper[4631]: E1129 04:29:29.337973 4631 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 29 04:29:29 crc kubenswrapper[4631]: E1129 04:29:29.338032 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift podName:874bb6b3-16cb-4d17-bf8b-6d3593d727d0 nodeName:}" failed. No retries permitted until 2025-11-29 04:29:29.838013575 +0000 UTC m=+1096.902517089 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift") pod "swift-storage-0" (UID: "874bb6b3-16cb-4d17-bf8b-6d3593d727d0") : configmap "swift-ring-files" not found Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.337952 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.338281 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-lock\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.338321 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.338380 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-cache\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.356199 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvxqv\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-kube-api-access-kvxqv\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.413513 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.555529 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-s8gk2"] Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.556587 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.563319 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.563609 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.563727 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.574426 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-s8gk2"] Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.613554 4631 generic.go:334] "Generic (PLEG): container finished" podID="1334e52e-4dbd-4c2d-bd05-d19f59ef722b" containerID="77f6db0b483f13ba6c9895e3b1cb4b0b25d07af5b964394f1e91225f3277c610" exitCode=0 Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.613637 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kl2kj" event={"ID":"1334e52e-4dbd-4c2d-bd05-d19f59ef722b","Type":"ContainerDied","Data":"77f6db0b483f13ba6c9895e3b1cb4b0b25d07af5b964394f1e91225f3277c610"} Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.615872 4631 generic.go:334] "Generic (PLEG): container finished" podID="093fbd07-3966-4e87-bc8b-f007e875937f" containerID="20fb52cd87cc20708af976220cd7b202ca9e41f53aa76a84e56bb66a8ff19bb7" exitCode=0 Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.615901 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-g5w92" event={"ID":"093fbd07-3966-4e87-bc8b-f007e875937f","Type":"ContainerDied","Data":"20fb52cd87cc20708af976220cd7b202ca9e41f53aa76a84e56bb66a8ff19bb7"} Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.615923 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-g5w92" event={"ID":"093fbd07-3966-4e87-bc8b-f007e875937f","Type":"ContainerStarted","Data":"c080249d24027338138d511688594ad0eacf5913b5dcbd18e63e0ea28bfbdd96"} Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.744474 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-combined-ca-bundle\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.744780 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlwr7\" (UniqueName: \"kubernetes.io/projected/cfd7f275-e7d1-4239-b55a-b0566664e6bf-kube-api-access-jlwr7\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.744806 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-ring-data-devices\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.744840 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-scripts\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.744930 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-dispersionconf\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.744957 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-swiftconf\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.745005 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/cfd7f275-e7d1-4239-b55a-b0566664e6bf-etc-swift\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.845943 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-combined-ca-bundle\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.845982 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlwr7\" (UniqueName: \"kubernetes.io/projected/cfd7f275-e7d1-4239-b55a-b0566664e6bf-kube-api-access-jlwr7\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.846011 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-ring-data-devices\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.846045 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-scripts\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.846067 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.846098 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-dispersionconf\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.846121 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-swiftconf\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.846160 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/cfd7f275-e7d1-4239-b55a-b0566664e6bf-etc-swift\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.846497 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/cfd7f275-e7d1-4239-b55a-b0566664e6bf-etc-swift\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: E1129 04:29:29.846611 4631 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 29 04:29:29 crc kubenswrapper[4631]: E1129 04:29:29.846625 4631 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 29 04:29:29 crc kubenswrapper[4631]: E1129 04:29:29.846659 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift podName:874bb6b3-16cb-4d17-bf8b-6d3593d727d0 nodeName:}" failed. No retries permitted until 2025-11-29 04:29:30.846646504 +0000 UTC m=+1097.911150018 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift") pod "swift-storage-0" (UID: "874bb6b3-16cb-4d17-bf8b-6d3593d727d0") : configmap "swift-ring-files" not found Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.847035 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-scripts\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.847486 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-ring-data-devices\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.851664 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-combined-ca-bundle\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.851729 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-swiftconf\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.852437 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-dispersionconf\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.865205 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlwr7\" (UniqueName: \"kubernetes.io/projected/cfd7f275-e7d1-4239-b55a-b0566664e6bf-kube-api-access-jlwr7\") pod \"swift-ring-rebalance-s8gk2\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:29 crc kubenswrapper[4631]: I1129 04:29:29.871879 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:30 crc kubenswrapper[4631]: I1129 04:29:30.036857 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 29 04:29:30 crc kubenswrapper[4631]: I1129 04:29:30.173562 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 29 04:29:30 crc kubenswrapper[4631]: I1129 04:29:30.361544 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-s8gk2"] Nov 29 04:29:30 crc kubenswrapper[4631]: I1129 04:29:30.625765 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-g5w92" event={"ID":"093fbd07-3966-4e87-bc8b-f007e875937f","Type":"ContainerStarted","Data":"0dad95f4d76c1be8f557c91bbee84bc400d1680b83812ca4f2bc9b81509f19ea"} Nov 29 04:29:30 crc kubenswrapper[4631]: I1129 04:29:30.626697 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:30 crc kubenswrapper[4631]: I1129 04:29:30.628227 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kl2kj" event={"ID":"1334e52e-4dbd-4c2d-bd05-d19f59ef722b","Type":"ContainerStarted","Data":"ef63652f358a4a4366f0c037649c495058ff9a65a828e14e53a48753eea1bf0d"} Nov 29 04:29:30 crc kubenswrapper[4631]: I1129 04:29:30.628266 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-kl2kj" event={"ID":"1334e52e-4dbd-4c2d-bd05-d19f59ef722b","Type":"ContainerStarted","Data":"ad51c4638eaa0fbe5f281b449160adcf64f81521b95a232e1339015d086d0b03"} Nov 29 04:29:30 crc kubenswrapper[4631]: I1129 04:29:30.630268 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-s8gk2" event={"ID":"cfd7f275-e7d1-4239-b55a-b0566664e6bf","Type":"ContainerStarted","Data":"a9edcf043fa22ffac6a71cf47ce3c34f5006afc8a8d04960d9becf4c31534799"} Nov 29 04:29:30 crc kubenswrapper[4631]: I1129 04:29:30.646616 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-g5w92" podStartSLOduration=3.646602877 podStartE2EDuration="3.646602877s" podCreationTimestamp="2025-11-29 04:29:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:29:30.644771042 +0000 UTC m=+1097.709274546" watchObservedRunningTime="2025-11-29 04:29:30.646602877 +0000 UTC m=+1097.711106391" Nov 29 04:29:30 crc kubenswrapper[4631]: I1129 04:29:30.861518 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:30 crc kubenswrapper[4631]: E1129 04:29:30.861820 4631 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 29 04:29:30 crc kubenswrapper[4631]: E1129 04:29:30.861851 4631 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 29 04:29:30 crc kubenswrapper[4631]: E1129 04:29:30.861914 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift podName:874bb6b3-16cb-4d17-bf8b-6d3593d727d0 nodeName:}" failed. No retries permitted until 2025-11-29 04:29:32.861892342 +0000 UTC m=+1099.926395856 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift") pod "swift-storage-0" (UID: "874bb6b3-16cb-4d17-bf8b-6d3593d727d0") : configmap "swift-ring-files" not found Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.297842 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-76d2-account-create-update-8cjpz"] Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.311179 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-76d2-account-create-update-8cjpz" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.314666 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.326915 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-wqw7t"] Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.328014 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-wqw7t" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.370705 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-operator-scripts\") pod \"glance-76d2-account-create-update-8cjpz\" (UID: \"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38\") " pod="openstack/glance-76d2-account-create-update-8cjpz" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.370790 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9cxm\" (UniqueName: \"kubernetes.io/projected/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-kube-api-access-w9cxm\") pod \"glance-76d2-account-create-update-8cjpz\" (UID: \"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38\") " pod="openstack/glance-76d2-account-create-update-8cjpz" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.373683 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-76d2-account-create-update-8cjpz"] Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.382153 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-wqw7t"] Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.471930 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-operator-scripts\") pod \"glance-76d2-account-create-update-8cjpz\" (UID: \"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38\") " pod="openstack/glance-76d2-account-create-update-8cjpz" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.471983 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c93fa44-2e1c-406b-aa4f-967436e33d1f-operator-scripts\") pod \"glance-db-create-wqw7t\" (UID: \"8c93fa44-2e1c-406b-aa4f-967436e33d1f\") " pod="openstack/glance-db-create-wqw7t" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.472043 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9cxm\" (UniqueName: \"kubernetes.io/projected/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-kube-api-access-w9cxm\") pod \"glance-76d2-account-create-update-8cjpz\" (UID: \"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38\") " pod="openstack/glance-76d2-account-create-update-8cjpz" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.472107 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5xnq\" (UniqueName: \"kubernetes.io/projected/8c93fa44-2e1c-406b-aa4f-967436e33d1f-kube-api-access-v5xnq\") pod \"glance-db-create-wqw7t\" (UID: \"8c93fa44-2e1c-406b-aa4f-967436e33d1f\") " pod="openstack/glance-db-create-wqw7t" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.472904 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-operator-scripts\") pod \"glance-76d2-account-create-update-8cjpz\" (UID: \"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38\") " pod="openstack/glance-76d2-account-create-update-8cjpz" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.501019 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9cxm\" (UniqueName: \"kubernetes.io/projected/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-kube-api-access-w9cxm\") pod \"glance-76d2-account-create-update-8cjpz\" (UID: \"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38\") " pod="openstack/glance-76d2-account-create-update-8cjpz" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.573720 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5xnq\" (UniqueName: \"kubernetes.io/projected/8c93fa44-2e1c-406b-aa4f-967436e33d1f-kube-api-access-v5xnq\") pod \"glance-db-create-wqw7t\" (UID: \"8c93fa44-2e1c-406b-aa4f-967436e33d1f\") " pod="openstack/glance-db-create-wqw7t" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.574107 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c93fa44-2e1c-406b-aa4f-967436e33d1f-operator-scripts\") pod \"glance-db-create-wqw7t\" (UID: \"8c93fa44-2e1c-406b-aa4f-967436e33d1f\") " pod="openstack/glance-db-create-wqw7t" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.574853 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c93fa44-2e1c-406b-aa4f-967436e33d1f-operator-scripts\") pod \"glance-db-create-wqw7t\" (UID: \"8c93fa44-2e1c-406b-aa4f-967436e33d1f\") " pod="openstack/glance-db-create-wqw7t" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.605655 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5xnq\" (UniqueName: \"kubernetes.io/projected/8c93fa44-2e1c-406b-aa4f-967436e33d1f-kube-api-access-v5xnq\") pod \"glance-db-create-wqw7t\" (UID: \"8c93fa44-2e1c-406b-aa4f-967436e33d1f\") " pod="openstack/glance-db-create-wqw7t" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.639716 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.639758 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.658747 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-kl2kj" podStartSLOduration=16.58527079 podStartE2EDuration="51.658733669s" podCreationTimestamp="2025-11-29 04:28:40 +0000 UTC" firstStartedPulling="2025-11-29 04:28:51.998369959 +0000 UTC m=+1059.062873473" lastFinishedPulling="2025-11-29 04:29:27.071832848 +0000 UTC m=+1094.136336352" observedRunningTime="2025-11-29 04:29:31.65587784 +0000 UTC m=+1098.720381354" watchObservedRunningTime="2025-11-29 04:29:31.658733669 +0000 UTC m=+1098.723237183" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.659273 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-76d2-account-create-update-8cjpz" Nov 29 04:29:31 crc kubenswrapper[4631]: I1129 04:29:31.675227 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-wqw7t" Nov 29 04:29:32 crc kubenswrapper[4631]: I1129 04:29:32.217418 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-wqw7t"] Nov 29 04:29:32 crc kubenswrapper[4631]: W1129 04:29:32.230816 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c93fa44_2e1c_406b_aa4f_967436e33d1f.slice/crio-54ce5ca69a8d75398eba88a01fec495c0f8f002fa158d2296fc0a36c97553a08 WatchSource:0}: Error finding container 54ce5ca69a8d75398eba88a01fec495c0f8f002fa158d2296fc0a36c97553a08: Status 404 returned error can't find the container with id 54ce5ca69a8d75398eba88a01fec495c0f8f002fa158d2296fc0a36c97553a08 Nov 29 04:29:32 crc kubenswrapper[4631]: I1129 04:29:32.322411 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-76d2-account-create-update-8cjpz"] Nov 29 04:29:32 crc kubenswrapper[4631]: W1129 04:29:32.329458 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a5ebe9e_271d_4e18_88ee_cd5933fa1a38.slice/crio-4728844cfa571385c8b1b79cb63140963a394c6229a9202589b8e0ec7723c7ba WatchSource:0}: Error finding container 4728844cfa571385c8b1b79cb63140963a394c6229a9202589b8e0ec7723c7ba: Status 404 returned error can't find the container with id 4728844cfa571385c8b1b79cb63140963a394c6229a9202589b8e0ec7723c7ba Nov 29 04:29:32 crc kubenswrapper[4631]: I1129 04:29:32.648830 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-76d2-account-create-update-8cjpz" event={"ID":"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38","Type":"ContainerStarted","Data":"16979f2296e2f251032bf77412746d907fab31551ba350fd48330eaed4ea8e56"} Nov 29 04:29:32 crc kubenswrapper[4631]: I1129 04:29:32.649091 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-76d2-account-create-update-8cjpz" event={"ID":"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38","Type":"ContainerStarted","Data":"4728844cfa571385c8b1b79cb63140963a394c6229a9202589b8e0ec7723c7ba"} Nov 29 04:29:32 crc kubenswrapper[4631]: I1129 04:29:32.655971 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-wqw7t" event={"ID":"8c93fa44-2e1c-406b-aa4f-967436e33d1f","Type":"ContainerStarted","Data":"1ef80e39cf89ec44b4431dce81708dc24d232b7c235a92534c1632f9ec14aecb"} Nov 29 04:29:32 crc kubenswrapper[4631]: I1129 04:29:32.656028 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-wqw7t" event={"ID":"8c93fa44-2e1c-406b-aa4f-967436e33d1f","Type":"ContainerStarted","Data":"54ce5ca69a8d75398eba88a01fec495c0f8f002fa158d2296fc0a36c97553a08"} Nov 29 04:29:32 crc kubenswrapper[4631]: I1129 04:29:32.669878 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-76d2-account-create-update-8cjpz" podStartSLOduration=1.669859687 podStartE2EDuration="1.669859687s" podCreationTimestamp="2025-11-29 04:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:29:32.662468157 +0000 UTC m=+1099.726971681" watchObservedRunningTime="2025-11-29 04:29:32.669859687 +0000 UTC m=+1099.734363201" Nov 29 04:29:32 crc kubenswrapper[4631]: I1129 04:29:32.689278 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-wqw7t" podStartSLOduration=1.689258928 podStartE2EDuration="1.689258928s" podCreationTimestamp="2025-11-29 04:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:29:32.686076931 +0000 UTC m=+1099.750580445" watchObservedRunningTime="2025-11-29 04:29:32.689258928 +0000 UTC m=+1099.753762432" Nov 29 04:29:32 crc kubenswrapper[4631]: I1129 04:29:32.897414 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:32 crc kubenswrapper[4631]: E1129 04:29:32.897657 4631 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 29 04:29:32 crc kubenswrapper[4631]: E1129 04:29:32.897676 4631 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 29 04:29:32 crc kubenswrapper[4631]: E1129 04:29:32.897721 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift podName:874bb6b3-16cb-4d17-bf8b-6d3593d727d0 nodeName:}" failed. No retries permitted until 2025-11-29 04:29:36.897708557 +0000 UTC m=+1103.962212071 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift") pod "swift-storage-0" (UID: "874bb6b3-16cb-4d17-bf8b-6d3593d727d0") : configmap "swift-ring-files" not found Nov 29 04:29:33 crc kubenswrapper[4631]: I1129 04:29:33.663936 4631 generic.go:334] "Generic (PLEG): container finished" podID="8c93fa44-2e1c-406b-aa4f-967436e33d1f" containerID="1ef80e39cf89ec44b4431dce81708dc24d232b7c235a92534c1632f9ec14aecb" exitCode=0 Nov 29 04:29:33 crc kubenswrapper[4631]: I1129 04:29:33.663986 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-wqw7t" event={"ID":"8c93fa44-2e1c-406b-aa4f-967436e33d1f","Type":"ContainerDied","Data":"1ef80e39cf89ec44b4431dce81708dc24d232b7c235a92534c1632f9ec14aecb"} Nov 29 04:29:33 crc kubenswrapper[4631]: I1129 04:29:33.666829 4631 generic.go:334] "Generic (PLEG): container finished" podID="2a5ebe9e-271d-4e18-88ee-cd5933fa1a38" containerID="16979f2296e2f251032bf77412746d907fab31551ba350fd48330eaed4ea8e56" exitCode=0 Nov 29 04:29:33 crc kubenswrapper[4631]: I1129 04:29:33.666867 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-76d2-account-create-update-8cjpz" event={"ID":"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38","Type":"ContainerDied","Data":"16979f2296e2f251032bf77412746d907fab31551ba350fd48330eaed4ea8e56"} Nov 29 04:29:34 crc kubenswrapper[4631]: I1129 04:29:34.952823 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-nxmq7"] Nov 29 04:29:34 crc kubenswrapper[4631]: I1129 04:29:34.957744 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nxmq7" Nov 29 04:29:34 crc kubenswrapper[4631]: I1129 04:29:34.974867 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-nxmq7"] Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.029349 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-875wm\" (UniqueName: \"kubernetes.io/projected/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-kube-api-access-875wm\") pod \"keystone-db-create-nxmq7\" (UID: \"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4\") " pod="openstack/keystone-db-create-nxmq7" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.029503 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-operator-scripts\") pod \"keystone-db-create-nxmq7\" (UID: \"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4\") " pod="openstack/keystone-db-create-nxmq7" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.063794 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-c306-account-create-update-wh7pg"] Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.064764 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c306-account-create-update-wh7pg" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.067465 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.074745 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c306-account-create-update-wh7pg"] Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.130850 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-operator-scripts\") pod \"keystone-db-create-nxmq7\" (UID: \"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4\") " pod="openstack/keystone-db-create-nxmq7" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.130904 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-875wm\" (UniqueName: \"kubernetes.io/projected/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-kube-api-access-875wm\") pod \"keystone-db-create-nxmq7\" (UID: \"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4\") " pod="openstack/keystone-db-create-nxmq7" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.130955 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0d28ea4-ab7d-4765-88a9-f889c935c418-operator-scripts\") pod \"keystone-c306-account-create-update-wh7pg\" (UID: \"e0d28ea4-ab7d-4765-88a9-f889c935c418\") " pod="openstack/keystone-c306-account-create-update-wh7pg" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.130982 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swlnq\" (UniqueName: \"kubernetes.io/projected/e0d28ea4-ab7d-4765-88a9-f889c935c418-kube-api-access-swlnq\") pod \"keystone-c306-account-create-update-wh7pg\" (UID: \"e0d28ea4-ab7d-4765-88a9-f889c935c418\") " pod="openstack/keystone-c306-account-create-update-wh7pg" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.131750 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-operator-scripts\") pod \"keystone-db-create-nxmq7\" (UID: \"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4\") " pod="openstack/keystone-db-create-nxmq7" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.147771 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-875wm\" (UniqueName: \"kubernetes.io/projected/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-kube-api-access-875wm\") pod \"keystone-db-create-nxmq7\" (UID: \"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4\") " pod="openstack/keystone-db-create-nxmq7" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.232184 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0d28ea4-ab7d-4765-88a9-f889c935c418-operator-scripts\") pod \"keystone-c306-account-create-update-wh7pg\" (UID: \"e0d28ea4-ab7d-4765-88a9-f889c935c418\") " pod="openstack/keystone-c306-account-create-update-wh7pg" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.232219 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swlnq\" (UniqueName: \"kubernetes.io/projected/e0d28ea4-ab7d-4765-88a9-f889c935c418-kube-api-access-swlnq\") pod \"keystone-c306-account-create-update-wh7pg\" (UID: \"e0d28ea4-ab7d-4765-88a9-f889c935c418\") " pod="openstack/keystone-c306-account-create-update-wh7pg" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.233402 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0d28ea4-ab7d-4765-88a9-f889c935c418-operator-scripts\") pod \"keystone-c306-account-create-update-wh7pg\" (UID: \"e0d28ea4-ab7d-4765-88a9-f889c935c418\") " pod="openstack/keystone-c306-account-create-update-wh7pg" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.259169 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swlnq\" (UniqueName: \"kubernetes.io/projected/e0d28ea4-ab7d-4765-88a9-f889c935c418-kube-api-access-swlnq\") pod \"keystone-c306-account-create-update-wh7pg\" (UID: \"e0d28ea4-ab7d-4765-88a9-f889c935c418\") " pod="openstack/keystone-c306-account-create-update-wh7pg" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.280659 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nxmq7" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.291043 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-d2q8v"] Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.291955 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-d2q8v" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.311671 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-d2q8v"] Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.333224 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88af8b16-da55-45bc-b04a-38984d9f6f2d-operator-scripts\") pod \"placement-db-create-d2q8v\" (UID: \"88af8b16-da55-45bc-b04a-38984d9f6f2d\") " pod="openstack/placement-db-create-d2q8v" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.333283 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fg8dj\" (UniqueName: \"kubernetes.io/projected/88af8b16-da55-45bc-b04a-38984d9f6f2d-kube-api-access-fg8dj\") pod \"placement-db-create-d2q8v\" (UID: \"88af8b16-da55-45bc-b04a-38984d9f6f2d\") " pod="openstack/placement-db-create-d2q8v" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.392028 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c306-account-create-update-wh7pg" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.398201 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5ce7-account-create-update-bnj85"] Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.399130 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ce7-account-create-update-bnj85" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.401345 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.416748 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5ce7-account-create-update-bnj85"] Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.434197 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ht9m\" (UniqueName: \"kubernetes.io/projected/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-kube-api-access-8ht9m\") pod \"placement-5ce7-account-create-update-bnj85\" (UID: \"bcc88877-f1aa-4a40-b362-4b95ee7f4a72\") " pod="openstack/placement-5ce7-account-create-update-bnj85" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.435986 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-operator-scripts\") pod \"placement-5ce7-account-create-update-bnj85\" (UID: \"bcc88877-f1aa-4a40-b362-4b95ee7f4a72\") " pod="openstack/placement-5ce7-account-create-update-bnj85" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.436603 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88af8b16-da55-45bc-b04a-38984d9f6f2d-operator-scripts\") pod \"placement-db-create-d2q8v\" (UID: \"88af8b16-da55-45bc-b04a-38984d9f6f2d\") " pod="openstack/placement-db-create-d2q8v" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.436656 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88af8b16-da55-45bc-b04a-38984d9f6f2d-operator-scripts\") pod \"placement-db-create-d2q8v\" (UID: \"88af8b16-da55-45bc-b04a-38984d9f6f2d\") " pod="openstack/placement-db-create-d2q8v" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.436705 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fg8dj\" (UniqueName: \"kubernetes.io/projected/88af8b16-da55-45bc-b04a-38984d9f6f2d-kube-api-access-fg8dj\") pod \"placement-db-create-d2q8v\" (UID: \"88af8b16-da55-45bc-b04a-38984d9f6f2d\") " pod="openstack/placement-db-create-d2q8v" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.458117 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fg8dj\" (UniqueName: \"kubernetes.io/projected/88af8b16-da55-45bc-b04a-38984d9f6f2d-kube-api-access-fg8dj\") pod \"placement-db-create-d2q8v\" (UID: \"88af8b16-da55-45bc-b04a-38984d9f6f2d\") " pod="openstack/placement-db-create-d2q8v" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.537824 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-operator-scripts\") pod \"placement-5ce7-account-create-update-bnj85\" (UID: \"bcc88877-f1aa-4a40-b362-4b95ee7f4a72\") " pod="openstack/placement-5ce7-account-create-update-bnj85" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.537927 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ht9m\" (UniqueName: \"kubernetes.io/projected/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-kube-api-access-8ht9m\") pod \"placement-5ce7-account-create-update-bnj85\" (UID: \"bcc88877-f1aa-4a40-b362-4b95ee7f4a72\") " pod="openstack/placement-5ce7-account-create-update-bnj85" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.538475 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-operator-scripts\") pod \"placement-5ce7-account-create-update-bnj85\" (UID: \"bcc88877-f1aa-4a40-b362-4b95ee7f4a72\") " pod="openstack/placement-5ce7-account-create-update-bnj85" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.552813 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ht9m\" (UniqueName: \"kubernetes.io/projected/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-kube-api-access-8ht9m\") pod \"placement-5ce7-account-create-update-bnj85\" (UID: \"bcc88877-f1aa-4a40-b362-4b95ee7f4a72\") " pod="openstack/placement-5ce7-account-create-update-bnj85" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.605374 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-d2q8v" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.726746 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ce7-account-create-update-bnj85" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.757812 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-76d2-account-create-update-8cjpz" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.761887 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-wqw7t" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.842216 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c93fa44-2e1c-406b-aa4f-967436e33d1f-operator-scripts\") pod \"8c93fa44-2e1c-406b-aa4f-967436e33d1f\" (UID: \"8c93fa44-2e1c-406b-aa4f-967436e33d1f\") " Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.842281 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-operator-scripts\") pod \"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38\" (UID: \"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38\") " Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.842325 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5xnq\" (UniqueName: \"kubernetes.io/projected/8c93fa44-2e1c-406b-aa4f-967436e33d1f-kube-api-access-v5xnq\") pod \"8c93fa44-2e1c-406b-aa4f-967436e33d1f\" (UID: \"8c93fa44-2e1c-406b-aa4f-967436e33d1f\") " Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.842436 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9cxm\" (UniqueName: \"kubernetes.io/projected/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-kube-api-access-w9cxm\") pod \"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38\" (UID: \"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38\") " Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.842932 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c93fa44-2e1c-406b-aa4f-967436e33d1f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8c93fa44-2e1c-406b-aa4f-967436e33d1f" (UID: "8c93fa44-2e1c-406b-aa4f-967436e33d1f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.842992 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2a5ebe9e-271d-4e18-88ee-cd5933fa1a38" (UID: "2a5ebe9e-271d-4e18-88ee-cd5933fa1a38"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.846915 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c93fa44-2e1c-406b-aa4f-967436e33d1f-kube-api-access-v5xnq" (OuterVolumeSpecName: "kube-api-access-v5xnq") pod "8c93fa44-2e1c-406b-aa4f-967436e33d1f" (UID: "8c93fa44-2e1c-406b-aa4f-967436e33d1f"). InnerVolumeSpecName "kube-api-access-v5xnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.847222 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-kube-api-access-w9cxm" (OuterVolumeSpecName: "kube-api-access-w9cxm") pod "2a5ebe9e-271d-4e18-88ee-cd5933fa1a38" (UID: "2a5ebe9e-271d-4e18-88ee-cd5933fa1a38"). InnerVolumeSpecName "kube-api-access-w9cxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.944779 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c93fa44-2e1c-406b-aa4f-967436e33d1f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.944803 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.944813 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5xnq\" (UniqueName: \"kubernetes.io/projected/8c93fa44-2e1c-406b-aa4f-967436e33d1f-kube-api-access-v5xnq\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:35 crc kubenswrapper[4631]: I1129 04:29:35.944826 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9cxm\" (UniqueName: \"kubernetes.io/projected/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38-kube-api-access-w9cxm\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:36 crc kubenswrapper[4631]: I1129 04:29:36.711892 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-wqw7t" event={"ID":"8c93fa44-2e1c-406b-aa4f-967436e33d1f","Type":"ContainerDied","Data":"54ce5ca69a8d75398eba88a01fec495c0f8f002fa158d2296fc0a36c97553a08"} Nov 29 04:29:36 crc kubenswrapper[4631]: I1129 04:29:36.712247 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54ce5ca69a8d75398eba88a01fec495c0f8f002fa158d2296fc0a36c97553a08" Nov 29 04:29:36 crc kubenswrapper[4631]: I1129 04:29:36.712303 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-wqw7t" Nov 29 04:29:36 crc kubenswrapper[4631]: I1129 04:29:36.740096 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-76d2-account-create-update-8cjpz" event={"ID":"2a5ebe9e-271d-4e18-88ee-cd5933fa1a38","Type":"ContainerDied","Data":"4728844cfa571385c8b1b79cb63140963a394c6229a9202589b8e0ec7723c7ba"} Nov 29 04:29:36 crc kubenswrapper[4631]: I1129 04:29:36.740131 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4728844cfa571385c8b1b79cb63140963a394c6229a9202589b8e0ec7723c7ba" Nov 29 04:29:36 crc kubenswrapper[4631]: I1129 04:29:36.740187 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-76d2-account-create-update-8cjpz" Nov 29 04:29:36 crc kubenswrapper[4631]: I1129 04:29:36.977688 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:36 crc kubenswrapper[4631]: E1129 04:29:36.978565 4631 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 29 04:29:36 crc kubenswrapper[4631]: E1129 04:29:36.978592 4631 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 29 04:29:36 crc kubenswrapper[4631]: E1129 04:29:36.978657 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift podName:874bb6b3-16cb-4d17-bf8b-6d3593d727d0 nodeName:}" failed. No retries permitted until 2025-11-29 04:29:44.978639955 +0000 UTC m=+1112.043143469 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift") pod "swift-storage-0" (UID: "874bb6b3-16cb-4d17-bf8b-6d3593d727d0") : configmap "swift-ring-files" not found Nov 29 04:29:37 crc kubenswrapper[4631]: I1129 04:29:37.021003 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-nxmq7"] Nov 29 04:29:37 crc kubenswrapper[4631]: W1129 04:29:37.031970 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ca8d74c_2b26_4c5a_9d68_4773da8f6ff4.slice/crio-b990739aa62c40af26d4c48b50e3982de2ab49415d7e1222c1fd4db2e08b355f WatchSource:0}: Error finding container b990739aa62c40af26d4c48b50e3982de2ab49415d7e1222c1fd4db2e08b355f: Status 404 returned error can't find the container with id b990739aa62c40af26d4c48b50e3982de2ab49415d7e1222c1fd4db2e08b355f Nov 29 04:29:37 crc kubenswrapper[4631]: I1129 04:29:37.157633 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5ce7-account-create-update-bnj85"] Nov 29 04:29:37 crc kubenswrapper[4631]: W1129 04:29:37.164215 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbcc88877_f1aa_4a40_b362_4b95ee7f4a72.slice/crio-6ccf40fbb2f506626e3354279dc19c2ae3fe9f49a0b7ee80906b65a31331c87f WatchSource:0}: Error finding container 6ccf40fbb2f506626e3354279dc19c2ae3fe9f49a0b7ee80906b65a31331c87f: Status 404 returned error can't find the container with id 6ccf40fbb2f506626e3354279dc19c2ae3fe9f49a0b7ee80906b65a31331c87f Nov 29 04:29:37 crc kubenswrapper[4631]: I1129 04:29:37.298648 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-d2q8v"] Nov 29 04:29:37 crc kubenswrapper[4631]: W1129 04:29:37.345769 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0d28ea4_ab7d_4765_88a9_f889c935c418.slice/crio-7ffda758efd25eedad7e0ab60f0c2531a3ff0a761036b79117ec3d7c31e23df2 WatchSource:0}: Error finding container 7ffda758efd25eedad7e0ab60f0c2531a3ff0a761036b79117ec3d7c31e23df2: Status 404 returned error can't find the container with id 7ffda758efd25eedad7e0ab60f0c2531a3ff0a761036b79117ec3d7c31e23df2 Nov 29 04:29:37 crc kubenswrapper[4631]: I1129 04:29:37.347684 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c306-account-create-update-wh7pg"] Nov 29 04:29:37 crc kubenswrapper[4631]: I1129 04:29:37.751129 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-d2q8v" event={"ID":"88af8b16-da55-45bc-b04a-38984d9f6f2d","Type":"ContainerStarted","Data":"b4ae665061e95de6f31ec357b71a3c5bec7fc05ff0fbd7c97c7dc3b094b13491"} Nov 29 04:29:37 crc kubenswrapper[4631]: I1129 04:29:37.753916 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c306-account-create-update-wh7pg" event={"ID":"e0d28ea4-ab7d-4765-88a9-f889c935c418","Type":"ContainerStarted","Data":"7ffda758efd25eedad7e0ab60f0c2531a3ff0a761036b79117ec3d7c31e23df2"} Nov 29 04:29:37 crc kubenswrapper[4631]: I1129 04:29:37.756470 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5ce7-account-create-update-bnj85" event={"ID":"bcc88877-f1aa-4a40-b362-4b95ee7f4a72","Type":"ContainerStarted","Data":"6ccf40fbb2f506626e3354279dc19c2ae3fe9f49a0b7ee80906b65a31331c87f"} Nov 29 04:29:37 crc kubenswrapper[4631]: I1129 04:29:37.758309 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-nxmq7" event={"ID":"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4","Type":"ContainerStarted","Data":"b990739aa62c40af26d4c48b50e3982de2ab49415d7e1222c1fd4db2e08b355f"} Nov 29 04:29:37 crc kubenswrapper[4631]: I1129 04:29:37.850713 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.205593 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.274804 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f8659"] Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.275074 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" podUID="03f0a309-9927-471e-b4ff-caf759d1b050" containerName="dnsmasq-dns" containerID="cri-o://3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb" gracePeriod=10 Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.819110 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5ce7-account-create-update-bnj85" event={"ID":"bcc88877-f1aa-4a40-b362-4b95ee7f4a72","Type":"ContainerStarted","Data":"83e083ddb0c739a58651a4a78580d188e9e361ea09cdecdee97cfdb17bd959f4"} Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.822118 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-nxmq7" event={"ID":"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4","Type":"ContainerStarted","Data":"d7f7f7e88a0df25271213e207f849c6868f8f84acfe6f2ee4bf2b9a621991426"} Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.827587 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5bd78ee0-c12e-4d6b-a47d-3652c3150c8d","Type":"ContainerStarted","Data":"96fcb1066a3c7158cf06c0a2ded552a92ebd7b9df6e02d377cb0d7efb420583b"} Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.828143 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.829845 4631 generic.go:334] "Generic (PLEG): container finished" podID="03f0a309-9927-471e-b4ff-caf759d1b050" containerID="3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb" exitCode=0 Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.829897 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" event={"ID":"03f0a309-9927-471e-b4ff-caf759d1b050","Type":"ContainerDied","Data":"3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb"} Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.829917 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" event={"ID":"03f0a309-9927-471e-b4ff-caf759d1b050","Type":"ContainerDied","Data":"ae5e80ad8fe8f957c4048300f4743b1897b371f7ef2a32967c7e8308dbd237ae"} Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.829932 4631 scope.go:117] "RemoveContainer" containerID="3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.833360 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-d2q8v" event={"ID":"88af8b16-da55-45bc-b04a-38984d9f6f2d","Type":"ContainerStarted","Data":"5cab43d7561ccbc5d714b54ffc7519562ef28babace98df339823fa7c8de3ae8"} Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.841544 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5ce7-account-create-update-bnj85" podStartSLOduration=3.841528208 podStartE2EDuration="3.841528208s" podCreationTimestamp="2025-11-29 04:29:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:29:38.834835955 +0000 UTC m=+1105.899339469" watchObservedRunningTime="2025-11-29 04:29:38.841528208 +0000 UTC m=+1105.906031722" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.842390 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c306-account-create-update-wh7pg" event={"ID":"e0d28ea4-ab7d-4765-88a9-f889c935c418","Type":"ContainerStarted","Data":"9014280ee4e0991c7227bba0ca2606123046d66eaf690ba1c7d7e324f9d6e803"} Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.849616 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"c80339e5-63b2-451d-a7fb-25ef7a2fba6a","Type":"ContainerStarted","Data":"ea38119c170faa0bf83e0c6e06b903048c4f2df50e83ef5623badc52c8713d51"} Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.852522 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-s8gk2" event={"ID":"cfd7f275-e7d1-4239-b55a-b0566664e6bf","Type":"ContainerStarted","Data":"8140bedf184069d892448f54dfb24eab45904217061530bfc3ad5c53214c3d78"} Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.853782 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-d2q8v" podStartSLOduration=3.853772166 podStartE2EDuration="3.853772166s" podCreationTimestamp="2025-11-29 04:29:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:29:38.849364928 +0000 UTC m=+1105.913868452" watchObservedRunningTime="2025-11-29 04:29:38.853772166 +0000 UTC m=+1105.918275680" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.858679 4631 scope.go:117] "RemoveContainer" containerID="5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.897381 4631 scope.go:117] "RemoveContainer" containerID="3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb" Nov 29 04:29:38 crc kubenswrapper[4631]: E1129 04:29:38.897746 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb\": container with ID starting with 3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb not found: ID does not exist" containerID="3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.897801 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb"} err="failed to get container status \"3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb\": rpc error: code = NotFound desc = could not find container \"3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb\": container with ID starting with 3f6a0c0827e221f8ecebb7202ee11851e9d6f78895adcd998e7ede7319c579bb not found: ID does not exist" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.897834 4631 scope.go:117] "RemoveContainer" containerID="5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b" Nov 29 04:29:38 crc kubenswrapper[4631]: E1129 04:29:38.898243 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b\": container with ID starting with 5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b not found: ID does not exist" containerID="5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.898268 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b"} err="failed to get container status \"5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b\": rpc error: code = NotFound desc = could not find container \"5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b\": container with ID starting with 5d6de35b9a4acd93b0eb7bccdf85fdc387e52345666051eb838bddc7747cb73b not found: ID does not exist" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.907850 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=11.417346773 podStartE2EDuration="55.907833771s" podCreationTimestamp="2025-11-29 04:28:43 +0000 UTC" firstStartedPulling="2025-11-29 04:28:52.124153478 +0000 UTC m=+1059.188656992" lastFinishedPulling="2025-11-29 04:29:36.614640436 +0000 UTC m=+1103.679143990" observedRunningTime="2025-11-29 04:29:38.907240357 +0000 UTC m=+1105.971743881" watchObservedRunningTime="2025-11-29 04:29:38.907833771 +0000 UTC m=+1105.972337285" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.922873 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-nxmq7" podStartSLOduration=4.922857937 podStartE2EDuration="4.922857937s" podCreationTimestamp="2025-11-29 04:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:29:38.921879203 +0000 UTC m=+1105.986382717" watchObservedRunningTime="2025-11-29 04:29:38.922857937 +0000 UTC m=+1105.987361451" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.945229 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-c306-account-create-update-wh7pg" podStartSLOduration=3.945209681 podStartE2EDuration="3.945209681s" podCreationTimestamp="2025-11-29 04:29:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:29:38.943201222 +0000 UTC m=+1106.007704736" watchObservedRunningTime="2025-11-29 04:29:38.945209681 +0000 UTC m=+1106.009713195" Nov 29 04:29:38 crc kubenswrapper[4631]: I1129 04:29:38.986177 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=11.953179734999999 podStartE2EDuration="55.986159428s" podCreationTimestamp="2025-11-29 04:28:43 +0000 UTC" firstStartedPulling="2025-11-29 04:28:52.621619465 +0000 UTC m=+1059.686122979" lastFinishedPulling="2025-11-29 04:29:36.654599138 +0000 UTC m=+1103.719102672" observedRunningTime="2025-11-29 04:29:38.980947831 +0000 UTC m=+1106.045451345" watchObservedRunningTime="2025-11-29 04:29:38.986159428 +0000 UTC m=+1106.050662942" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.004624 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-s8gk2" podStartSLOduration=3.7345814429999997 podStartE2EDuration="10.003315865s" podCreationTimestamp="2025-11-29 04:29:29 +0000 UTC" firstStartedPulling="2025-11-29 04:29:30.385501077 +0000 UTC m=+1097.450004591" lastFinishedPulling="2025-11-29 04:29:36.654235489 +0000 UTC m=+1103.718739013" observedRunningTime="2025-11-29 04:29:39.000720002 +0000 UTC m=+1106.065223526" watchObservedRunningTime="2025-11-29 04:29:39.003315865 +0000 UTC m=+1106.067819379" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.030557 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-sb\") pod \"03f0a309-9927-471e-b4ff-caf759d1b050\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.030668 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-826rj\" (UniqueName: \"kubernetes.io/projected/03f0a309-9927-471e-b4ff-caf759d1b050-kube-api-access-826rj\") pod \"03f0a309-9927-471e-b4ff-caf759d1b050\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.030772 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-config\") pod \"03f0a309-9927-471e-b4ff-caf759d1b050\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.030812 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-dns-svc\") pod \"03f0a309-9927-471e-b4ff-caf759d1b050\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.030833 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-nb\") pod \"03f0a309-9927-471e-b4ff-caf759d1b050\" (UID: \"03f0a309-9927-471e-b4ff-caf759d1b050\") " Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.036212 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03f0a309-9927-471e-b4ff-caf759d1b050-kube-api-access-826rj" (OuterVolumeSpecName: "kube-api-access-826rj") pod "03f0a309-9927-471e-b4ff-caf759d1b050" (UID: "03f0a309-9927-471e-b4ff-caf759d1b050"). InnerVolumeSpecName "kube-api-access-826rj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.065934 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "03f0a309-9927-471e-b4ff-caf759d1b050" (UID: "03f0a309-9927-471e-b4ff-caf759d1b050"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.068511 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "03f0a309-9927-471e-b4ff-caf759d1b050" (UID: "03f0a309-9927-471e-b4ff-caf759d1b050"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.074822 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-config" (OuterVolumeSpecName: "config") pod "03f0a309-9927-471e-b4ff-caf759d1b050" (UID: "03f0a309-9927-471e-b4ff-caf759d1b050"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.080871 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "03f0a309-9927-471e-b4ff-caf759d1b050" (UID: "03f0a309-9927-471e-b4ff-caf759d1b050"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.132593 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-826rj\" (UniqueName: \"kubernetes.io/projected/03f0a309-9927-471e-b4ff-caf759d1b050-kube-api-access-826rj\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.132788 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.132879 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.132946 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.133009 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/03f0a309-9927-471e-b4ff-caf759d1b050-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.394489 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 29 04:29:39 crc kubenswrapper[4631]: E1129 04:29:39.553473 4631 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88af8b16_da55_45bc_b04a_38984d9f6f2d.slice/crio-5cab43d7561ccbc5d714b54ffc7519562ef28babace98df339823fa7c8de3ae8.scope\": RecentStats: unable to find data in memory cache]" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.580247 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.862729 4631 generic.go:334] "Generic (PLEG): container finished" podID="e0d28ea4-ab7d-4765-88a9-f889c935c418" containerID="9014280ee4e0991c7227bba0ca2606123046d66eaf690ba1c7d7e324f9d6e803" exitCode=0 Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.862795 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c306-account-create-update-wh7pg" event={"ID":"e0d28ea4-ab7d-4765-88a9-f889c935c418","Type":"ContainerDied","Data":"9014280ee4e0991c7227bba0ca2606123046d66eaf690ba1c7d7e324f9d6e803"} Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.864872 4631 generic.go:334] "Generic (PLEG): container finished" podID="bcc88877-f1aa-4a40-b362-4b95ee7f4a72" containerID="83e083ddb0c739a58651a4a78580d188e9e361ea09cdecdee97cfdb17bd959f4" exitCode=0 Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.864939 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5ce7-account-create-update-bnj85" event={"ID":"bcc88877-f1aa-4a40-b362-4b95ee7f4a72","Type":"ContainerDied","Data":"83e083ddb0c739a58651a4a78580d188e9e361ea09cdecdee97cfdb17bd959f4"} Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.866551 4631 generic.go:334] "Generic (PLEG): container finished" podID="3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4" containerID="d7f7f7e88a0df25271213e207f849c6868f8f84acfe6f2ee4bf2b9a621991426" exitCode=0 Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.866645 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-nxmq7" event={"ID":"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4","Type":"ContainerDied","Data":"d7f7f7e88a0df25271213e207f849c6868f8f84acfe6f2ee4bf2b9a621991426"} Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.867623 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-f8659" Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.869377 4631 generic.go:334] "Generic (PLEG): container finished" podID="88af8b16-da55-45bc-b04a-38984d9f6f2d" containerID="5cab43d7561ccbc5d714b54ffc7519562ef28babace98df339823fa7c8de3ae8" exitCode=0 Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.869462 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-d2q8v" event={"ID":"88af8b16-da55-45bc-b04a-38984d9f6f2d","Type":"ContainerDied","Data":"5cab43d7561ccbc5d714b54ffc7519562ef28babace98df339823fa7c8de3ae8"} Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.914482 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f8659"] Nov 29 04:29:39 crc kubenswrapper[4631]: I1129 04:29:39.923480 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f8659"] Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.237865 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03f0a309-9927-471e-b4ff-caf759d1b050" path="/var/lib/kubelet/pods/03f0a309-9927-471e-b4ff-caf759d1b050/volumes" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.267215 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-d2q8v" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.375126 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fg8dj\" (UniqueName: \"kubernetes.io/projected/88af8b16-da55-45bc-b04a-38984d9f6f2d-kube-api-access-fg8dj\") pod \"88af8b16-da55-45bc-b04a-38984d9f6f2d\" (UID: \"88af8b16-da55-45bc-b04a-38984d9f6f2d\") " Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.375230 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88af8b16-da55-45bc-b04a-38984d9f6f2d-operator-scripts\") pod \"88af8b16-da55-45bc-b04a-38984d9f6f2d\" (UID: \"88af8b16-da55-45bc-b04a-38984d9f6f2d\") " Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.376453 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88af8b16-da55-45bc-b04a-38984d9f6f2d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "88af8b16-da55-45bc-b04a-38984d9f6f2d" (UID: "88af8b16-da55-45bc-b04a-38984d9f6f2d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.381837 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88af8b16-da55-45bc-b04a-38984d9f6f2d-kube-api-access-fg8dj" (OuterVolumeSpecName: "kube-api-access-fg8dj") pod "88af8b16-da55-45bc-b04a-38984d9f6f2d" (UID: "88af8b16-da55-45bc-b04a-38984d9f6f2d"). InnerVolumeSpecName "kube-api-access-fg8dj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.394933 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.422483 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nxmq7" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.430116 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c306-account-create-update-wh7pg" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.450268 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ce7-account-create-update-bnj85" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.477106 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fg8dj\" (UniqueName: \"kubernetes.io/projected/88af8b16-da55-45bc-b04a-38984d9f6f2d-kube-api-access-fg8dj\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.477144 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88af8b16-da55-45bc-b04a-38984d9f6f2d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.487638 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580142 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-57lmq"] Nov 29 04:29:41 crc kubenswrapper[4631]: E1129 04:29:41.580501 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcc88877-f1aa-4a40-b362-4b95ee7f4a72" containerName="mariadb-account-create-update" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580518 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcc88877-f1aa-4a40-b362-4b95ee7f4a72" containerName="mariadb-account-create-update" Nov 29 04:29:41 crc kubenswrapper[4631]: E1129 04:29:41.580531 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03f0a309-9927-471e-b4ff-caf759d1b050" containerName="dnsmasq-dns" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580538 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="03f0a309-9927-471e-b4ff-caf759d1b050" containerName="dnsmasq-dns" Nov 29 04:29:41 crc kubenswrapper[4631]: E1129 04:29:41.580555 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a5ebe9e-271d-4e18-88ee-cd5933fa1a38" containerName="mariadb-account-create-update" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580560 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a5ebe9e-271d-4e18-88ee-cd5933fa1a38" containerName="mariadb-account-create-update" Nov 29 04:29:41 crc kubenswrapper[4631]: E1129 04:29:41.580575 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88af8b16-da55-45bc-b04a-38984d9f6f2d" containerName="mariadb-database-create" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580580 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="88af8b16-da55-45bc-b04a-38984d9f6f2d" containerName="mariadb-database-create" Nov 29 04:29:41 crc kubenswrapper[4631]: E1129 04:29:41.580592 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4" containerName="mariadb-database-create" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580597 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4" containerName="mariadb-database-create" Nov 29 04:29:41 crc kubenswrapper[4631]: E1129 04:29:41.580606 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0d28ea4-ab7d-4765-88a9-f889c935c418" containerName="mariadb-account-create-update" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580614 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0d28ea4-ab7d-4765-88a9-f889c935c418" containerName="mariadb-account-create-update" Nov 29 04:29:41 crc kubenswrapper[4631]: E1129 04:29:41.580624 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03f0a309-9927-471e-b4ff-caf759d1b050" containerName="init" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580631 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="03f0a309-9927-471e-b4ff-caf759d1b050" containerName="init" Nov 29 04:29:41 crc kubenswrapper[4631]: E1129 04:29:41.580642 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c93fa44-2e1c-406b-aa4f-967436e33d1f" containerName="mariadb-database-create" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580648 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c93fa44-2e1c-406b-aa4f-967436e33d1f" containerName="mariadb-database-create" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580804 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0d28ea4-ab7d-4765-88a9-f889c935c418" containerName="mariadb-account-create-update" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580813 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4" containerName="mariadb-database-create" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580823 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a5ebe9e-271d-4e18-88ee-cd5933fa1a38" containerName="mariadb-account-create-update" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580833 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="88af8b16-da55-45bc-b04a-38984d9f6f2d" containerName="mariadb-database-create" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580845 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcc88877-f1aa-4a40-b362-4b95ee7f4a72" containerName="mariadb-account-create-update" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580854 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c93fa44-2e1c-406b-aa4f-967436e33d1f" containerName="mariadb-database-create" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.580862 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="03f0a309-9927-471e-b4ff-caf759d1b050" containerName="dnsmasq-dns" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.583091 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ht9m\" (UniqueName: \"kubernetes.io/projected/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-kube-api-access-8ht9m\") pod \"bcc88877-f1aa-4a40-b362-4b95ee7f4a72\" (UID: \"bcc88877-f1aa-4a40-b362-4b95ee7f4a72\") " Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.583219 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0d28ea4-ab7d-4765-88a9-f889c935c418-operator-scripts\") pod \"e0d28ea4-ab7d-4765-88a9-f889c935c418\" (UID: \"e0d28ea4-ab7d-4765-88a9-f889c935c418\") " Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.583273 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-operator-scripts\") pod \"bcc88877-f1aa-4a40-b362-4b95ee7f4a72\" (UID: \"bcc88877-f1aa-4a40-b362-4b95ee7f4a72\") " Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.583311 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swlnq\" (UniqueName: \"kubernetes.io/projected/e0d28ea4-ab7d-4765-88a9-f889c935c418-kube-api-access-swlnq\") pod \"e0d28ea4-ab7d-4765-88a9-f889c935c418\" (UID: \"e0d28ea4-ab7d-4765-88a9-f889c935c418\") " Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.583374 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-operator-scripts\") pod \"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4\" (UID: \"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4\") " Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.583414 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-875wm\" (UniqueName: \"kubernetes.io/projected/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-kube-api-access-875wm\") pod \"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4\" (UID: \"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4\") " Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.584043 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bcc88877-f1aa-4a40-b362-4b95ee7f4a72" (UID: "bcc88877-f1aa-4a40-b362-4b95ee7f4a72"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.584760 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0d28ea4-ab7d-4765-88a9-f889c935c418-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e0d28ea4-ab7d-4765-88a9-f889c935c418" (UID: "e0d28ea4-ab7d-4765-88a9-f889c935c418"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.584936 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.585269 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4" (UID: "3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.585368 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.588920 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-qmjvc" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.589068 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.590134 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0d28ea4-ab7d-4765-88a9-f889c935c418-kube-api-access-swlnq" (OuterVolumeSpecName: "kube-api-access-swlnq") pod "e0d28ea4-ab7d-4765-88a9-f889c935c418" (UID: "e0d28ea4-ab7d-4765-88a9-f889c935c418"). InnerVolumeSpecName "kube-api-access-swlnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.590162 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-kube-api-access-875wm" (OuterVolumeSpecName: "kube-api-access-875wm") pod "3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4" (UID: "3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4"). InnerVolumeSpecName "kube-api-access-875wm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.597201 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-kube-api-access-8ht9m" (OuterVolumeSpecName: "kube-api-access-8ht9m") pod "bcc88877-f1aa-4a40-b362-4b95ee7f4a72" (UID: "bcc88877-f1aa-4a40-b362-4b95ee7f4a72"). InnerVolumeSpecName "kube-api-access-8ht9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.597813 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-57lmq"] Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.637429 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.685772 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-db-sync-config-data\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.686026 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-config-data\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.686125 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzd87\" (UniqueName: \"kubernetes.io/projected/c6cdce96-7bd4-45c6-9597-6196ceee67ef-kube-api-access-zzd87\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.686276 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-combined-ca-bundle\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.686532 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ht9m\" (UniqueName: \"kubernetes.io/projected/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-kube-api-access-8ht9m\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.686619 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0d28ea4-ab7d-4765-88a9-f889c935c418-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.686676 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bcc88877-f1aa-4a40-b362-4b95ee7f4a72-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.686729 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swlnq\" (UniqueName: \"kubernetes.io/projected/e0d28ea4-ab7d-4765-88a9-f889c935c418-kube-api-access-swlnq\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.686782 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.686859 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-875wm\" (UniqueName: \"kubernetes.io/projected/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4-kube-api-access-875wm\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.787786 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-db-sync-config-data\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.788033 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-config-data\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.788061 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzd87\" (UniqueName: \"kubernetes.io/projected/c6cdce96-7bd4-45c6-9597-6196ceee67ef-kube-api-access-zzd87\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.788094 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-combined-ca-bundle\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.790741 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-db-sync-config-data\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.791186 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-config-data\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.791783 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-combined-ca-bundle\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.803588 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzd87\" (UniqueName: \"kubernetes.io/projected/c6cdce96-7bd4-45c6-9597-6196ceee67ef-kube-api-access-zzd87\") pod \"glance-db-sync-57lmq\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.883906 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-nxmq7" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.883900 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-nxmq7" event={"ID":"3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4","Type":"ContainerDied","Data":"b990739aa62c40af26d4c48b50e3982de2ab49415d7e1222c1fd4db2e08b355f"} Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.884380 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b990739aa62c40af26d4c48b50e3982de2ab49415d7e1222c1fd4db2e08b355f" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.885439 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-d2q8v" event={"ID":"88af8b16-da55-45bc-b04a-38984d9f6f2d","Type":"ContainerDied","Data":"b4ae665061e95de6f31ec357b71a3c5bec7fc05ff0fbd7c97c7dc3b094b13491"} Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.885541 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4ae665061e95de6f31ec357b71a3c5bec7fc05ff0fbd7c97c7dc3b094b13491" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.885500 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-d2q8v" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.887054 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c306-account-create-update-wh7pg" event={"ID":"e0d28ea4-ab7d-4765-88a9-f889c935c418","Type":"ContainerDied","Data":"7ffda758efd25eedad7e0ab60f0c2531a3ff0a761036b79117ec3d7c31e23df2"} Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.887095 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ffda758efd25eedad7e0ab60f0c2531a3ff0a761036b79117ec3d7c31e23df2" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.887066 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c306-account-create-update-wh7pg" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.888912 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5ce7-account-create-update-bnj85" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.892514 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5ce7-account-create-update-bnj85" event={"ID":"bcc88877-f1aa-4a40-b362-4b95ee7f4a72","Type":"ContainerDied","Data":"6ccf40fbb2f506626e3354279dc19c2ae3fe9f49a0b7ee80906b65a31331c87f"} Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.892565 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ccf40fbb2f506626e3354279dc19c2ae3fe9f49a0b7ee80906b65a31331c87f" Nov 29 04:29:41 crc kubenswrapper[4631]: I1129 04:29:41.909873 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-57lmq" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.174456 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.453878 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-57lmq"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.528466 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.595434 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-m57j4"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.596427 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-m57j4" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.607399 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-m57j4"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.622909 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-28fd-account-create-update-wj22h"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.623949 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-28fd-account-create-update-wj22h" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.629877 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.649976 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-28fd-account-create-update-wj22h"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.700813 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-tlnb9"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.701775 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tlnb9" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.716020 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c103a4b-4b90-43c6-838f-8a71fb909eaa-operator-scripts\") pod \"cinder-db-create-m57j4\" (UID: \"9c103a4b-4b90-43c6-838f-8a71fb909eaa\") " pod="openstack/cinder-db-create-m57j4" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.716069 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftwbc\" (UniqueName: \"kubernetes.io/projected/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-kube-api-access-ftwbc\") pod \"cinder-28fd-account-create-update-wj22h\" (UID: \"0940a4d9-6460-4e3e-91c2-b84ac32e33c4\") " pod="openstack/cinder-28fd-account-create-update-wj22h" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.716108 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-operator-scripts\") pod \"cinder-28fd-account-create-update-wj22h\" (UID: \"0940a4d9-6460-4e3e-91c2-b84ac32e33c4\") " pod="openstack/cinder-28fd-account-create-update-wj22h" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.716181 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6lv4\" (UniqueName: \"kubernetes.io/projected/9c103a4b-4b90-43c6-838f-8a71fb909eaa-kube-api-access-l6lv4\") pod \"cinder-db-create-m57j4\" (UID: \"9c103a4b-4b90-43c6-838f-8a71fb909eaa\") " pod="openstack/cinder-db-create-m57j4" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.720499 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-tlnb9"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.795286 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-1934-account-create-update-twc59"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.796436 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1934-account-create-update-twc59" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.798374 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.817408 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6lv4\" (UniqueName: \"kubernetes.io/projected/9c103a4b-4b90-43c6-838f-8a71fb909eaa-kube-api-access-l6lv4\") pod \"cinder-db-create-m57j4\" (UID: \"9c103a4b-4b90-43c6-838f-8a71fb909eaa\") " pod="openstack/cinder-db-create-m57j4" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.817450 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77fgw\" (UniqueName: \"kubernetes.io/projected/342b6f95-d7bc-491b-b9bf-a218b7825807-kube-api-access-77fgw\") pod \"barbican-db-create-tlnb9\" (UID: \"342b6f95-d7bc-491b-b9bf-a218b7825807\") " pod="openstack/barbican-db-create-tlnb9" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.817481 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/342b6f95-d7bc-491b-b9bf-a218b7825807-operator-scripts\") pod \"barbican-db-create-tlnb9\" (UID: \"342b6f95-d7bc-491b-b9bf-a218b7825807\") " pod="openstack/barbican-db-create-tlnb9" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.817552 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c103a4b-4b90-43c6-838f-8a71fb909eaa-operator-scripts\") pod \"cinder-db-create-m57j4\" (UID: \"9c103a4b-4b90-43c6-838f-8a71fb909eaa\") " pod="openstack/cinder-db-create-m57j4" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.817582 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftwbc\" (UniqueName: \"kubernetes.io/projected/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-kube-api-access-ftwbc\") pod \"cinder-28fd-account-create-update-wj22h\" (UID: \"0940a4d9-6460-4e3e-91c2-b84ac32e33c4\") " pod="openstack/cinder-28fd-account-create-update-wj22h" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.817617 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-operator-scripts\") pod \"cinder-28fd-account-create-update-wj22h\" (UID: \"0940a4d9-6460-4e3e-91c2-b84ac32e33c4\") " pod="openstack/cinder-28fd-account-create-update-wj22h" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.818406 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-operator-scripts\") pod \"cinder-28fd-account-create-update-wj22h\" (UID: \"0940a4d9-6460-4e3e-91c2-b84ac32e33c4\") " pod="openstack/cinder-28fd-account-create-update-wj22h" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.819228 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c103a4b-4b90-43c6-838f-8a71fb909eaa-operator-scripts\") pod \"cinder-db-create-m57j4\" (UID: \"9c103a4b-4b90-43c6-838f-8a71fb909eaa\") " pod="openstack/cinder-db-create-m57j4" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.858081 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftwbc\" (UniqueName: \"kubernetes.io/projected/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-kube-api-access-ftwbc\") pod \"cinder-28fd-account-create-update-wj22h\" (UID: \"0940a4d9-6460-4e3e-91c2-b84ac32e33c4\") " pod="openstack/cinder-28fd-account-create-update-wj22h" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.863695 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6lv4\" (UniqueName: \"kubernetes.io/projected/9c103a4b-4b90-43c6-838f-8a71fb909eaa-kube-api-access-l6lv4\") pod \"cinder-db-create-m57j4\" (UID: \"9c103a4b-4b90-43c6-838f-8a71fb909eaa\") " pod="openstack/cinder-db-create-m57j4" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.865981 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1934-account-create-update-twc59"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.904204 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-57lmq" event={"ID":"c6cdce96-7bd4-45c6-9597-6196ceee67ef","Type":"ContainerStarted","Data":"f89561fe83ccabd9075f3ae746a2839314537ae1ba3e3f0cd7e9a31f8861f76a"} Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.925425 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28xqg\" (UniqueName: \"kubernetes.io/projected/8734e05c-9806-444b-b0c7-31d795cc4e8a-kube-api-access-28xqg\") pod \"barbican-1934-account-create-update-twc59\" (UID: \"8734e05c-9806-444b-b0c7-31d795cc4e8a\") " pod="openstack/barbican-1934-account-create-update-twc59" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.925501 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8734e05c-9806-444b-b0c7-31d795cc4e8a-operator-scripts\") pod \"barbican-1934-account-create-update-twc59\" (UID: \"8734e05c-9806-444b-b0c7-31d795cc4e8a\") " pod="openstack/barbican-1934-account-create-update-twc59" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.925566 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77fgw\" (UniqueName: \"kubernetes.io/projected/342b6f95-d7bc-491b-b9bf-a218b7825807-kube-api-access-77fgw\") pod \"barbican-db-create-tlnb9\" (UID: \"342b6f95-d7bc-491b-b9bf-a218b7825807\") " pod="openstack/barbican-db-create-tlnb9" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.925592 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/342b6f95-d7bc-491b-b9bf-a218b7825807-operator-scripts\") pod \"barbican-db-create-tlnb9\" (UID: \"342b6f95-d7bc-491b-b9bf-a218b7825807\") " pod="openstack/barbican-db-create-tlnb9" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.926226 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/342b6f95-d7bc-491b-b9bf-a218b7825807-operator-scripts\") pod \"barbican-db-create-tlnb9\" (UID: \"342b6f95-d7bc-491b-b9bf-a218b7825807\") " pod="openstack/barbican-db-create-tlnb9" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.929006 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-m57j4" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.934064 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-f5pvv"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.946365 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-f5pvv" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.946764 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-28fd-account-create-update-wj22h" Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.980419 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-f5pvv"] Nov 29 04:29:42 crc kubenswrapper[4631]: I1129 04:29:42.985375 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77fgw\" (UniqueName: \"kubernetes.io/projected/342b6f95-d7bc-491b-b9bf-a218b7825807-kube-api-access-77fgw\") pod \"barbican-db-create-tlnb9\" (UID: \"342b6f95-d7bc-491b-b9bf-a218b7825807\") " pod="openstack/barbican-db-create-tlnb9" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.014571 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tlnb9" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.027092 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/627bd4eb-a3ae-4a48-bccf-e65734ff396e-operator-scripts\") pod \"neutron-db-create-f5pvv\" (UID: \"627bd4eb-a3ae-4a48-bccf-e65734ff396e\") " pod="openstack/neutron-db-create-f5pvv" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.027192 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw4nd\" (UniqueName: \"kubernetes.io/projected/627bd4eb-a3ae-4a48-bccf-e65734ff396e-kube-api-access-cw4nd\") pod \"neutron-db-create-f5pvv\" (UID: \"627bd4eb-a3ae-4a48-bccf-e65734ff396e\") " pod="openstack/neutron-db-create-f5pvv" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.027264 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7566-account-create-update-wrcb6"] Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.027271 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28xqg\" (UniqueName: \"kubernetes.io/projected/8734e05c-9806-444b-b0c7-31d795cc4e8a-kube-api-access-28xqg\") pod \"barbican-1934-account-create-update-twc59\" (UID: \"8734e05c-9806-444b-b0c7-31d795cc4e8a\") " pod="openstack/barbican-1934-account-create-update-twc59" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.027672 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8734e05c-9806-444b-b0c7-31d795cc4e8a-operator-scripts\") pod \"barbican-1934-account-create-update-twc59\" (UID: \"8734e05c-9806-444b-b0c7-31d795cc4e8a\") " pod="openstack/barbican-1934-account-create-update-twc59" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.028258 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8734e05c-9806-444b-b0c7-31d795cc4e8a-operator-scripts\") pod \"barbican-1934-account-create-update-twc59\" (UID: \"8734e05c-9806-444b-b0c7-31d795cc4e8a\") " pod="openstack/barbican-1934-account-create-update-twc59" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.028841 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7566-account-create-update-wrcb6" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.037945 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7566-account-create-update-wrcb6"] Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.045987 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.051867 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28xqg\" (UniqueName: \"kubernetes.io/projected/8734e05c-9806-444b-b0c7-31d795cc4e8a-kube-api-access-28xqg\") pod \"barbican-1934-account-create-update-twc59\" (UID: \"8734e05c-9806-444b-b0c7-31d795cc4e8a\") " pod="openstack/barbican-1934-account-create-update-twc59" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.116436 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1934-account-create-update-twc59" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.147021 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw4nd\" (UniqueName: \"kubernetes.io/projected/627bd4eb-a3ae-4a48-bccf-e65734ff396e-kube-api-access-cw4nd\") pod \"neutron-db-create-f5pvv\" (UID: \"627bd4eb-a3ae-4a48-bccf-e65734ff396e\") " pod="openstack/neutron-db-create-f5pvv" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.147372 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e71ff62-6937-453f-9add-da82958c3990-operator-scripts\") pod \"neutron-7566-account-create-update-wrcb6\" (UID: \"2e71ff62-6937-453f-9add-da82958c3990\") " pod="openstack/neutron-7566-account-create-update-wrcb6" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.147428 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/627bd4eb-a3ae-4a48-bccf-e65734ff396e-operator-scripts\") pod \"neutron-db-create-f5pvv\" (UID: \"627bd4eb-a3ae-4a48-bccf-e65734ff396e\") " pod="openstack/neutron-db-create-f5pvv" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.147478 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-446rx\" (UniqueName: \"kubernetes.io/projected/2e71ff62-6937-453f-9add-da82958c3990-kube-api-access-446rx\") pod \"neutron-7566-account-create-update-wrcb6\" (UID: \"2e71ff62-6937-453f-9add-da82958c3990\") " pod="openstack/neutron-7566-account-create-update-wrcb6" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.148615 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/627bd4eb-a3ae-4a48-bccf-e65734ff396e-operator-scripts\") pod \"neutron-db-create-f5pvv\" (UID: \"627bd4eb-a3ae-4a48-bccf-e65734ff396e\") " pod="openstack/neutron-db-create-f5pvv" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.181384 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw4nd\" (UniqueName: \"kubernetes.io/projected/627bd4eb-a3ae-4a48-bccf-e65734ff396e-kube-api-access-cw4nd\") pod \"neutron-db-create-f5pvv\" (UID: \"627bd4eb-a3ae-4a48-bccf-e65734ff396e\") " pod="openstack/neutron-db-create-f5pvv" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.248342 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-446rx\" (UniqueName: \"kubernetes.io/projected/2e71ff62-6937-453f-9add-da82958c3990-kube-api-access-446rx\") pod \"neutron-7566-account-create-update-wrcb6\" (UID: \"2e71ff62-6937-453f-9add-da82958c3990\") " pod="openstack/neutron-7566-account-create-update-wrcb6" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.248427 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e71ff62-6937-453f-9add-da82958c3990-operator-scripts\") pod \"neutron-7566-account-create-update-wrcb6\" (UID: \"2e71ff62-6937-453f-9add-da82958c3990\") " pod="openstack/neutron-7566-account-create-update-wrcb6" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.249023 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e71ff62-6937-453f-9add-da82958c3990-operator-scripts\") pod \"neutron-7566-account-create-update-wrcb6\" (UID: \"2e71ff62-6937-453f-9add-da82958c3990\") " pod="openstack/neutron-7566-account-create-update-wrcb6" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.281555 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-446rx\" (UniqueName: \"kubernetes.io/projected/2e71ff62-6937-453f-9add-da82958c3990-kube-api-access-446rx\") pod \"neutron-7566-account-create-update-wrcb6\" (UID: \"2e71ff62-6937-453f-9add-da82958c3990\") " pod="openstack/neutron-7566-account-create-update-wrcb6" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.300724 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-f5pvv" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.365708 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7566-account-create-update-wrcb6" Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.547756 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-m57j4"] Nov 29 04:29:43 crc kubenswrapper[4631]: W1129 04:29:43.548028 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c103a4b_4b90_43c6_838f_8a71fb909eaa.slice/crio-a0d8aad281dd2e6ff86f5ee76423ca9f10d568c4acea53b36fac5ae605ce5e0e WatchSource:0}: Error finding container a0d8aad281dd2e6ff86f5ee76423ca9f10d568c4acea53b36fac5ae605ce5e0e: Status 404 returned error can't find the container with id a0d8aad281dd2e6ff86f5ee76423ca9f10d568c4acea53b36fac5ae605ce5e0e Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.606507 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-28fd-account-create-update-wj22h"] Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.770196 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1934-account-create-update-twc59"] Nov 29 04:29:43 crc kubenswrapper[4631]: W1129 04:29:43.775140 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8734e05c_9806_444b_b0c7_31d795cc4e8a.slice/crio-c1757ddb5ce5208bdfcf92ef29d7e3eef930c83f48370995b8909f281b82fe13 WatchSource:0}: Error finding container c1757ddb5ce5208bdfcf92ef29d7e3eef930c83f48370995b8909f281b82fe13: Status 404 returned error can't find the container with id c1757ddb5ce5208bdfcf92ef29d7e3eef930c83f48370995b8909f281b82fe13 Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.909155 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-tlnb9"] Nov 29 04:29:43 crc kubenswrapper[4631]: W1129 04:29:43.917954 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod342b6f95_d7bc_491b_b9bf_a218b7825807.slice/crio-b44fef0385f025316abfc328998ed1a23e9bf4f3f75563e02eb8ba52c6cae0cf WatchSource:0}: Error finding container b44fef0385f025316abfc328998ed1a23e9bf4f3f75563e02eb8ba52c6cae0cf: Status 404 returned error can't find the container with id b44fef0385f025316abfc328998ed1a23e9bf4f3f75563e02eb8ba52c6cae0cf Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.925722 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1934-account-create-update-twc59" event={"ID":"8734e05c-9806-444b-b0c7-31d795cc4e8a","Type":"ContainerStarted","Data":"c1757ddb5ce5208bdfcf92ef29d7e3eef930c83f48370995b8909f281b82fe13"} Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.933702 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-m57j4" event={"ID":"9c103a4b-4b90-43c6-838f-8a71fb909eaa","Type":"ContainerStarted","Data":"a0d8aad281dd2e6ff86f5ee76423ca9f10d568c4acea53b36fac5ae605ce5e0e"} Nov 29 04:29:43 crc kubenswrapper[4631]: I1129 04:29:43.942717 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-28fd-account-create-update-wj22h" event={"ID":"0940a4d9-6460-4e3e-91c2-b84ac32e33c4","Type":"ContainerStarted","Data":"324d1b7db1a7f95b780f1706fa4a90bfa85fefb05c95d3114175eef15a2ab0f8"} Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.045902 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7566-account-create-update-wrcb6"] Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.068286 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-f5pvv"] Nov 29 04:29:44 crc kubenswrapper[4631]: W1129 04:29:44.081725 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod627bd4eb_a3ae_4a48_bccf_e65734ff396e.slice/crio-3bf58623638ab3bb8bec1c4d4023176a10f820f4ae82a225af43c510ec6e4279 WatchSource:0}: Error finding container 3bf58623638ab3bb8bec1c4d4023176a10f820f4ae82a225af43c510ec6e4279: Status 404 returned error can't find the container with id 3bf58623638ab3bb8bec1c4d4023176a10f820f4ae82a225af43c510ec6e4279 Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.442895 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.645648 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.933382 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.934622 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.936323 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-9wntf" Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.936562 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.940350 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.940486 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.954075 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.961140 4631 generic.go:334] "Generic (PLEG): container finished" podID="0940a4d9-6460-4e3e-91c2-b84ac32e33c4" containerID="ba7dca68b6cabbbd19beb62ab3c17bf131be9ab64450ef6fdc7e27b2f680fab7" exitCode=0 Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.961240 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-28fd-account-create-update-wj22h" event={"ID":"0940a4d9-6460-4e3e-91c2-b84ac32e33c4","Type":"ContainerDied","Data":"ba7dca68b6cabbbd19beb62ab3c17bf131be9ab64450ef6fdc7e27b2f680fab7"} Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.964005 4631 generic.go:334] "Generic (PLEG): container finished" podID="342b6f95-d7bc-491b-b9bf-a218b7825807" containerID="2b79860d20564eb7d9b1ba2ae7231e36863263c53cc01e7d849c390bf32446e5" exitCode=0 Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.964052 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-tlnb9" event={"ID":"342b6f95-d7bc-491b-b9bf-a218b7825807","Type":"ContainerDied","Data":"2b79860d20564eb7d9b1ba2ae7231e36863263c53cc01e7d849c390bf32446e5"} Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.964069 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-tlnb9" event={"ID":"342b6f95-d7bc-491b-b9bf-a218b7825807","Type":"ContainerStarted","Data":"b44fef0385f025316abfc328998ed1a23e9bf4f3f75563e02eb8ba52c6cae0cf"} Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.965385 4631 generic.go:334] "Generic (PLEG): container finished" podID="8734e05c-9806-444b-b0c7-31d795cc4e8a" containerID="2ad1bbb050894d1934ec8cb54b9d09052e5eb3c932a8cf7cd20e0d87eb85d6ec" exitCode=0 Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.965446 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1934-account-create-update-twc59" event={"ID":"8734e05c-9806-444b-b0c7-31d795cc4e8a","Type":"ContainerDied","Data":"2ad1bbb050894d1934ec8cb54b9d09052e5eb3c932a8cf7cd20e0d87eb85d6ec"} Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.968741 4631 generic.go:334] "Generic (PLEG): container finished" podID="9c103a4b-4b90-43c6-838f-8a71fb909eaa" containerID="e0b00c9c253ac2fe51f730864292f14c196fbd78e3468780a9281d86fd3a80af" exitCode=0 Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.968845 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-m57j4" event={"ID":"9c103a4b-4b90-43c6-838f-8a71fb909eaa","Type":"ContainerDied","Data":"e0b00c9c253ac2fe51f730864292f14c196fbd78e3468780a9281d86fd3a80af"} Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.970544 4631 generic.go:334] "Generic (PLEG): container finished" podID="627bd4eb-a3ae-4a48-bccf-e65734ff396e" containerID="984a38512a137d418dc5b16af7d191cc94ae7a9877c631b7d21c274c4a0ed842" exitCode=0 Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.970586 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-f5pvv" event={"ID":"627bd4eb-a3ae-4a48-bccf-e65734ff396e","Type":"ContainerDied","Data":"984a38512a137d418dc5b16af7d191cc94ae7a9877c631b7d21c274c4a0ed842"} Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.970602 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-f5pvv" event={"ID":"627bd4eb-a3ae-4a48-bccf-e65734ff396e","Type":"ContainerStarted","Data":"3bf58623638ab3bb8bec1c4d4023176a10f820f4ae82a225af43c510ec6e4279"} Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.975429 4631 generic.go:334] "Generic (PLEG): container finished" podID="2e71ff62-6937-453f-9add-da82958c3990" containerID="3fe56cb79c27c74bba1fb316dd45785f46b530ec775b7ba9925847fdd6291e11" exitCode=0 Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.975466 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7566-account-create-update-wrcb6" event={"ID":"2e71ff62-6937-453f-9add-da82958c3990","Type":"ContainerDied","Data":"3fe56cb79c27c74bba1fb316dd45785f46b530ec775b7ba9925847fdd6291e11"} Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.975483 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7566-account-create-update-wrcb6" event={"ID":"2e71ff62-6937-453f-9add-da82958c3990","Type":"ContainerStarted","Data":"040cba44aed352c29621e514b27f4f5444d785536a471629667af55bab3d21ec"} Nov 29 04:29:44 crc kubenswrapper[4631]: I1129 04:29:44.995756 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:29:44 crc kubenswrapper[4631]: E1129 04:29:44.995921 4631 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 29 04:29:44 crc kubenswrapper[4631]: E1129 04:29:44.995935 4631 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 29 04:29:44 crc kubenswrapper[4631]: E1129 04:29:44.995981 4631 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift podName:874bb6b3-16cb-4d17-bf8b-6d3593d727d0 nodeName:}" failed. No retries permitted until 2025-11-29 04:30:00.995965093 +0000 UTC m=+1128.060468607 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift") pod "swift-storage-0" (UID: "874bb6b3-16cb-4d17-bf8b-6d3593d727d0") : configmap "swift-ring-files" not found Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.097495 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7108e5ce-c50c-44e2-971f-9a22a4370b52-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.097868 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7108e5ce-c50c-44e2-971f-9a22a4370b52-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.097905 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7108e5ce-c50c-44e2-971f-9a22a4370b52-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.097927 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7108e5ce-c50c-44e2-971f-9a22a4370b52-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.097946 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqmh6\" (UniqueName: \"kubernetes.io/projected/7108e5ce-c50c-44e2-971f-9a22a4370b52-kube-api-access-hqmh6\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.099469 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7108e5ce-c50c-44e2-971f-9a22a4370b52-scripts\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.099495 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7108e5ce-c50c-44e2-971f-9a22a4370b52-config\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.201125 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7108e5ce-c50c-44e2-971f-9a22a4370b52-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.201168 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7108e5ce-c50c-44e2-971f-9a22a4370b52-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.201187 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqmh6\" (UniqueName: \"kubernetes.io/projected/7108e5ce-c50c-44e2-971f-9a22a4370b52-kube-api-access-hqmh6\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.201221 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7108e5ce-c50c-44e2-971f-9a22a4370b52-scripts\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.201241 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7108e5ce-c50c-44e2-971f-9a22a4370b52-config\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.201283 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7108e5ce-c50c-44e2-971f-9a22a4370b52-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.201370 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7108e5ce-c50c-44e2-971f-9a22a4370b52-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.202612 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7108e5ce-c50c-44e2-971f-9a22a4370b52-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.218295 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7108e5ce-c50c-44e2-971f-9a22a4370b52-scripts\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.218527 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7108e5ce-c50c-44e2-971f-9a22a4370b52-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.218557 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7108e5ce-c50c-44e2-971f-9a22a4370b52-config\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.219298 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7108e5ce-c50c-44e2-971f-9a22a4370b52-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.226273 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqmh6\" (UniqueName: \"kubernetes.io/projected/7108e5ce-c50c-44e2-971f-9a22a4370b52-kube-api-access-hqmh6\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.228212 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7108e5ce-c50c-44e2-971f-9a22a4370b52-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7108e5ce-c50c-44e2-971f-9a22a4370b52\") " pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.258642 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.588231 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.689866 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-w9m8l"] Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.691042 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.699733 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-w9m8l"] Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.699932 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.700110 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.700269 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-zjswg" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.706818 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.822055 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-config-data\") pod \"keystone-db-sync-w9m8l\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.822147 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdzwc\" (UniqueName: \"kubernetes.io/projected/425113ab-54e0-4372-bdd1-587e2dc743d2-kube-api-access-tdzwc\") pod \"keystone-db-sync-w9m8l\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.822192 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-combined-ca-bundle\") pod \"keystone-db-sync-w9m8l\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.923493 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdzwc\" (UniqueName: \"kubernetes.io/projected/425113ab-54e0-4372-bdd1-587e2dc743d2-kube-api-access-tdzwc\") pod \"keystone-db-sync-w9m8l\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.923556 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-combined-ca-bundle\") pod \"keystone-db-sync-w9m8l\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.923624 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-config-data\") pod \"keystone-db-sync-w9m8l\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.929212 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-config-data\") pod \"keystone-db-sync-w9m8l\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.930616 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-combined-ca-bundle\") pod \"keystone-db-sync-w9m8l\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.949661 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdzwc\" (UniqueName: \"kubernetes.io/projected/425113ab-54e0-4372-bdd1-587e2dc743d2-kube-api-access-tdzwc\") pod \"keystone-db-sync-w9m8l\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:45 crc kubenswrapper[4631]: I1129 04:29:45.987095 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7108e5ce-c50c-44e2-971f-9a22a4370b52","Type":"ContainerStarted","Data":"61a7d2429c60cfecc30046454a24c985e58d755cfcff8774f0b85319374a2af6"} Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.011020 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.445308 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7566-account-create-update-wrcb6" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.449489 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e71ff62-6937-453f-9add-da82958c3990-operator-scripts\") pod \"2e71ff62-6937-453f-9add-da82958c3990\" (UID: \"2e71ff62-6937-453f-9add-da82958c3990\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.450288 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e71ff62-6937-453f-9add-da82958c3990-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2e71ff62-6937-453f-9add-da82958c3990" (UID: "2e71ff62-6937-453f-9add-da82958c3990"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.557978 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-446rx\" (UniqueName: \"kubernetes.io/projected/2e71ff62-6937-453f-9add-da82958c3990-kube-api-access-446rx\") pod \"2e71ff62-6937-453f-9add-da82958c3990\" (UID: \"2e71ff62-6937-453f-9add-da82958c3990\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.558315 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e71ff62-6937-453f-9add-da82958c3990-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.568900 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e71ff62-6937-453f-9add-da82958c3990-kube-api-access-446rx" (OuterVolumeSpecName: "kube-api-access-446rx") pod "2e71ff62-6937-453f-9add-da82958c3990" (UID: "2e71ff62-6937-453f-9add-da82958c3990"). InnerVolumeSpecName "kube-api-access-446rx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.612894 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-28fd-account-create-update-wj22h" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.615486 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tlnb9" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.628604 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1934-account-create-update-twc59" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.635947 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-m57j4" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.643945 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-f5pvv" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663367 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6lv4\" (UniqueName: \"kubernetes.io/projected/9c103a4b-4b90-43c6-838f-8a71fb909eaa-kube-api-access-l6lv4\") pod \"9c103a4b-4b90-43c6-838f-8a71fb909eaa\" (UID: \"9c103a4b-4b90-43c6-838f-8a71fb909eaa\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663422 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28xqg\" (UniqueName: \"kubernetes.io/projected/8734e05c-9806-444b-b0c7-31d795cc4e8a-kube-api-access-28xqg\") pod \"8734e05c-9806-444b-b0c7-31d795cc4e8a\" (UID: \"8734e05c-9806-444b-b0c7-31d795cc4e8a\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663455 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/342b6f95-d7bc-491b-b9bf-a218b7825807-operator-scripts\") pod \"342b6f95-d7bc-491b-b9bf-a218b7825807\" (UID: \"342b6f95-d7bc-491b-b9bf-a218b7825807\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663485 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftwbc\" (UniqueName: \"kubernetes.io/projected/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-kube-api-access-ftwbc\") pod \"0940a4d9-6460-4e3e-91c2-b84ac32e33c4\" (UID: \"0940a4d9-6460-4e3e-91c2-b84ac32e33c4\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663518 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77fgw\" (UniqueName: \"kubernetes.io/projected/342b6f95-d7bc-491b-b9bf-a218b7825807-kube-api-access-77fgw\") pod \"342b6f95-d7bc-491b-b9bf-a218b7825807\" (UID: \"342b6f95-d7bc-491b-b9bf-a218b7825807\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663564 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cw4nd\" (UniqueName: \"kubernetes.io/projected/627bd4eb-a3ae-4a48-bccf-e65734ff396e-kube-api-access-cw4nd\") pod \"627bd4eb-a3ae-4a48-bccf-e65734ff396e\" (UID: \"627bd4eb-a3ae-4a48-bccf-e65734ff396e\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663584 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/627bd4eb-a3ae-4a48-bccf-e65734ff396e-operator-scripts\") pod \"627bd4eb-a3ae-4a48-bccf-e65734ff396e\" (UID: \"627bd4eb-a3ae-4a48-bccf-e65734ff396e\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663601 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c103a4b-4b90-43c6-838f-8a71fb909eaa-operator-scripts\") pod \"9c103a4b-4b90-43c6-838f-8a71fb909eaa\" (UID: \"9c103a4b-4b90-43c6-838f-8a71fb909eaa\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663624 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8734e05c-9806-444b-b0c7-31d795cc4e8a-operator-scripts\") pod \"8734e05c-9806-444b-b0c7-31d795cc4e8a\" (UID: \"8734e05c-9806-444b-b0c7-31d795cc4e8a\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663646 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-operator-scripts\") pod \"0940a4d9-6460-4e3e-91c2-b84ac32e33c4\" (UID: \"0940a4d9-6460-4e3e-91c2-b84ac32e33c4\") " Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.663881 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-446rx\" (UniqueName: \"kubernetes.io/projected/2e71ff62-6937-453f-9add-da82958c3990-kube-api-access-446rx\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.668303 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0940a4d9-6460-4e3e-91c2-b84ac32e33c4" (UID: "0940a4d9-6460-4e3e-91c2-b84ac32e33c4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.671149 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/342b6f95-d7bc-491b-b9bf-a218b7825807-kube-api-access-77fgw" (OuterVolumeSpecName: "kube-api-access-77fgw") pod "342b6f95-d7bc-491b-b9bf-a218b7825807" (UID: "342b6f95-d7bc-491b-b9bf-a218b7825807"). InnerVolumeSpecName "kube-api-access-77fgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.671426 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/627bd4eb-a3ae-4a48-bccf-e65734ff396e-kube-api-access-cw4nd" (OuterVolumeSpecName: "kube-api-access-cw4nd") pod "627bd4eb-a3ae-4a48-bccf-e65734ff396e" (UID: "627bd4eb-a3ae-4a48-bccf-e65734ff396e"). InnerVolumeSpecName "kube-api-access-cw4nd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.672195 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8734e05c-9806-444b-b0c7-31d795cc4e8a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8734e05c-9806-444b-b0c7-31d795cc4e8a" (UID: "8734e05c-9806-444b-b0c7-31d795cc4e8a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.672390 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/627bd4eb-a3ae-4a48-bccf-e65734ff396e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "627bd4eb-a3ae-4a48-bccf-e65734ff396e" (UID: "627bd4eb-a3ae-4a48-bccf-e65734ff396e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.673075 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/342b6f95-d7bc-491b-b9bf-a218b7825807-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "342b6f95-d7bc-491b-b9bf-a218b7825807" (UID: "342b6f95-d7bc-491b-b9bf-a218b7825807"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.677271 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c103a4b-4b90-43c6-838f-8a71fb909eaa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9c103a4b-4b90-43c6-838f-8a71fb909eaa" (UID: "9c103a4b-4b90-43c6-838f-8a71fb909eaa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.679810 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-kube-api-access-ftwbc" (OuterVolumeSpecName: "kube-api-access-ftwbc") pod "0940a4d9-6460-4e3e-91c2-b84ac32e33c4" (UID: "0940a4d9-6460-4e3e-91c2-b84ac32e33c4"). InnerVolumeSpecName "kube-api-access-ftwbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.679905 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8734e05c-9806-444b-b0c7-31d795cc4e8a-kube-api-access-28xqg" (OuterVolumeSpecName: "kube-api-access-28xqg") pod "8734e05c-9806-444b-b0c7-31d795cc4e8a" (UID: "8734e05c-9806-444b-b0c7-31d795cc4e8a"). InnerVolumeSpecName "kube-api-access-28xqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.679953 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c103a4b-4b90-43c6-838f-8a71fb909eaa-kube-api-access-l6lv4" (OuterVolumeSpecName: "kube-api-access-l6lv4") pod "9c103a4b-4b90-43c6-838f-8a71fb909eaa" (UID: "9c103a4b-4b90-43c6-838f-8a71fb909eaa"). InnerVolumeSpecName "kube-api-access-l6lv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.765674 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77fgw\" (UniqueName: \"kubernetes.io/projected/342b6f95-d7bc-491b-b9bf-a218b7825807-kube-api-access-77fgw\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.765733 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cw4nd\" (UniqueName: \"kubernetes.io/projected/627bd4eb-a3ae-4a48-bccf-e65734ff396e-kube-api-access-cw4nd\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.765744 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/627bd4eb-a3ae-4a48-bccf-e65734ff396e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.765752 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c103a4b-4b90-43c6-838f-8a71fb909eaa-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.765762 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8734e05c-9806-444b-b0c7-31d795cc4e8a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.765771 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.765778 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6lv4\" (UniqueName: \"kubernetes.io/projected/9c103a4b-4b90-43c6-838f-8a71fb909eaa-kube-api-access-l6lv4\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.765788 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28xqg\" (UniqueName: \"kubernetes.io/projected/8734e05c-9806-444b-b0c7-31d795cc4e8a-kube-api-access-28xqg\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.765796 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/342b6f95-d7bc-491b-b9bf-a218b7825807-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.765805 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftwbc\" (UniqueName: \"kubernetes.io/projected/0940a4d9-6460-4e3e-91c2-b84ac32e33c4-kube-api-access-ftwbc\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.820066 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-w9m8l"] Nov 29 04:29:46 crc kubenswrapper[4631]: W1129 04:29:46.834785 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod425113ab_54e0_4372_bdd1_587e2dc743d2.slice/crio-403bc32a3bf6f47e205990052f9794b31bb9198e2fc2c489c7b731828a349c4a WatchSource:0}: Error finding container 403bc32a3bf6f47e205990052f9794b31bb9198e2fc2c489c7b731828a349c4a: Status 404 returned error can't find the container with id 403bc32a3bf6f47e205990052f9794b31bb9198e2fc2c489c7b731828a349c4a Nov 29 04:29:46 crc kubenswrapper[4631]: I1129 04:29:46.998637 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-w9m8l" event={"ID":"425113ab-54e0-4372-bdd1-587e2dc743d2","Type":"ContainerStarted","Data":"403bc32a3bf6f47e205990052f9794b31bb9198e2fc2c489c7b731828a349c4a"} Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.000969 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-28fd-account-create-update-wj22h" event={"ID":"0940a4d9-6460-4e3e-91c2-b84ac32e33c4","Type":"ContainerDied","Data":"324d1b7db1a7f95b780f1706fa4a90bfa85fefb05c95d3114175eef15a2ab0f8"} Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.000994 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="324d1b7db1a7f95b780f1706fa4a90bfa85fefb05c95d3114175eef15a2ab0f8" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.001112 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-28fd-account-create-update-wj22h" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.008789 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-tlnb9" event={"ID":"342b6f95-d7bc-491b-b9bf-a218b7825807","Type":"ContainerDied","Data":"b44fef0385f025316abfc328998ed1a23e9bf4f3f75563e02eb8ba52c6cae0cf"} Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.008822 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b44fef0385f025316abfc328998ed1a23e9bf4f3f75563e02eb8ba52c6cae0cf" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.008869 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-tlnb9" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.015767 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1934-account-create-update-twc59" event={"ID":"8734e05c-9806-444b-b0c7-31d795cc4e8a","Type":"ContainerDied","Data":"c1757ddb5ce5208bdfcf92ef29d7e3eef930c83f48370995b8909f281b82fe13"} Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.015807 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1757ddb5ce5208bdfcf92ef29d7e3eef930c83f48370995b8909f281b82fe13" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.015861 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1934-account-create-update-twc59" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.022296 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-m57j4" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.022307 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-m57j4" event={"ID":"9c103a4b-4b90-43c6-838f-8a71fb909eaa","Type":"ContainerDied","Data":"a0d8aad281dd2e6ff86f5ee76423ca9f10d568c4acea53b36fac5ae605ce5e0e"} Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.022353 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0d8aad281dd2e6ff86f5ee76423ca9f10d568c4acea53b36fac5ae605ce5e0e" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.030915 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-f5pvv" event={"ID":"627bd4eb-a3ae-4a48-bccf-e65734ff396e","Type":"ContainerDied","Data":"3bf58623638ab3bb8bec1c4d4023176a10f820f4ae82a225af43c510ec6e4279"} Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.030945 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3bf58623638ab3bb8bec1c4d4023176a10f820f4ae82a225af43c510ec6e4279" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.030992 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-f5pvv" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.033202 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7566-account-create-update-wrcb6" event={"ID":"2e71ff62-6937-453f-9add-da82958c3990","Type":"ContainerDied","Data":"040cba44aed352c29621e514b27f4f5444d785536a471629667af55bab3d21ec"} Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.033231 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="040cba44aed352c29621e514b27f4f5444d785536a471629667af55bab3d21ec" Nov 29 04:29:47 crc kubenswrapper[4631]: I1129 04:29:47.033292 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7566-account-create-update-wrcb6" Nov 29 04:29:48 crc kubenswrapper[4631]: I1129 04:29:48.047982 4631 generic.go:334] "Generic (PLEG): container finished" podID="cfd7f275-e7d1-4239-b55a-b0566664e6bf" containerID="8140bedf184069d892448f54dfb24eab45904217061530bfc3ad5c53214c3d78" exitCode=0 Nov 29 04:29:48 crc kubenswrapper[4631]: I1129 04:29:48.048076 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-s8gk2" event={"ID":"cfd7f275-e7d1-4239-b55a-b0566664e6bf","Type":"ContainerDied","Data":"8140bedf184069d892448f54dfb24eab45904217061530bfc3ad5c53214c3d78"} Nov 29 04:29:48 crc kubenswrapper[4631]: I1129 04:29:48.052143 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7108e5ce-c50c-44e2-971f-9a22a4370b52","Type":"ContainerStarted","Data":"90a68133219b20590c6472d50da5b4524ccdb18ac4f88368fd93c98637ef52e8"} Nov 29 04:29:48 crc kubenswrapper[4631]: I1129 04:29:48.052173 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7108e5ce-c50c-44e2-971f-9a22a4370b52","Type":"ContainerStarted","Data":"22f9ad3bd0c5065659cfa1425227f1565cee99e683685f2db5c9366815f35f85"} Nov 29 04:29:48 crc kubenswrapper[4631]: I1129 04:29:48.052315 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 29 04:29:48 crc kubenswrapper[4631]: I1129 04:29:48.087713 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.3718981230000002 podStartE2EDuration="4.087693033s" podCreationTimestamp="2025-11-29 04:29:44 +0000 UTC" firstStartedPulling="2025-11-29 04:29:45.618072205 +0000 UTC m=+1112.682575719" lastFinishedPulling="2025-11-29 04:29:47.333867115 +0000 UTC m=+1114.398370629" observedRunningTime="2025-11-29 04:29:48.079623797 +0000 UTC m=+1115.144127321" watchObservedRunningTime="2025-11-29 04:29:48.087693033 +0000 UTC m=+1115.152196547" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.051843 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.116782 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-s8gk2" event={"ID":"cfd7f275-e7d1-4239-b55a-b0566664e6bf","Type":"ContainerDied","Data":"a9edcf043fa22ffac6a71cf47ce3c34f5006afc8a8d04960d9becf4c31534799"} Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.116845 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-s8gk2" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.116859 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9edcf043fa22ffac6a71cf47ce3c34f5006afc8a8d04960d9becf4c31534799" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.201965 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-swiftconf\") pod \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.202078 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-dispersionconf\") pod \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.202108 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-combined-ca-bundle\") pod \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.202255 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/cfd7f275-e7d1-4239-b55a-b0566664e6bf-etc-swift\") pod \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.202293 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlwr7\" (UniqueName: \"kubernetes.io/projected/cfd7f275-e7d1-4239-b55a-b0566664e6bf-kube-api-access-jlwr7\") pod \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.202316 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-scripts\") pod \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.202345 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-ring-data-devices\") pod \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\" (UID: \"cfd7f275-e7d1-4239-b55a-b0566664e6bf\") " Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.203438 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "cfd7f275-e7d1-4239-b55a-b0566664e6bf" (UID: "cfd7f275-e7d1-4239-b55a-b0566664e6bf"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.204401 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfd7f275-e7d1-4239-b55a-b0566664e6bf-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "cfd7f275-e7d1-4239-b55a-b0566664e6bf" (UID: "cfd7f275-e7d1-4239-b55a-b0566664e6bf"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.206968 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfd7f275-e7d1-4239-b55a-b0566664e6bf-kube-api-access-jlwr7" (OuterVolumeSpecName: "kube-api-access-jlwr7") pod "cfd7f275-e7d1-4239-b55a-b0566664e6bf" (UID: "cfd7f275-e7d1-4239-b55a-b0566664e6bf"). InnerVolumeSpecName "kube-api-access-jlwr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.212570 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "cfd7f275-e7d1-4239-b55a-b0566664e6bf" (UID: "cfd7f275-e7d1-4239-b55a-b0566664e6bf"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.226950 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-scripts" (OuterVolumeSpecName: "scripts") pod "cfd7f275-e7d1-4239-b55a-b0566664e6bf" (UID: "cfd7f275-e7d1-4239-b55a-b0566664e6bf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.246773 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cfd7f275-e7d1-4239-b55a-b0566664e6bf" (UID: "cfd7f275-e7d1-4239-b55a-b0566664e6bf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.260223 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "cfd7f275-e7d1-4239-b55a-b0566664e6bf" (UID: "cfd7f275-e7d1-4239-b55a-b0566664e6bf"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.304687 4631 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.304716 4631 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.304729 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfd7f275-e7d1-4239-b55a-b0566664e6bf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.304754 4631 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/cfd7f275-e7d1-4239-b55a-b0566664e6bf-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.304764 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlwr7\" (UniqueName: \"kubernetes.io/projected/cfd7f275-e7d1-4239-b55a-b0566664e6bf-kube-api-access-jlwr7\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.304773 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:29:54 crc kubenswrapper[4631]: I1129 04:29:54.304781 4631 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/cfd7f275-e7d1-4239-b55a-b0566664e6bf-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.167735 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k"] Nov 29 04:30:00 crc kubenswrapper[4631]: E1129 04:30:00.168959 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="627bd4eb-a3ae-4a48-bccf-e65734ff396e" containerName="mariadb-database-create" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.168977 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="627bd4eb-a3ae-4a48-bccf-e65734ff396e" containerName="mariadb-database-create" Nov 29 04:30:00 crc kubenswrapper[4631]: E1129 04:30:00.168989 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e71ff62-6937-453f-9add-da82958c3990" containerName="mariadb-account-create-update" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.168998 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e71ff62-6937-453f-9add-da82958c3990" containerName="mariadb-account-create-update" Nov 29 04:30:00 crc kubenswrapper[4631]: E1129 04:30:00.169018 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="342b6f95-d7bc-491b-b9bf-a218b7825807" containerName="mariadb-database-create" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169027 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="342b6f95-d7bc-491b-b9bf-a218b7825807" containerName="mariadb-database-create" Nov 29 04:30:00 crc kubenswrapper[4631]: E1129 04:30:00.169038 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c103a4b-4b90-43c6-838f-8a71fb909eaa" containerName="mariadb-database-create" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169046 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c103a4b-4b90-43c6-838f-8a71fb909eaa" containerName="mariadb-database-create" Nov 29 04:30:00 crc kubenswrapper[4631]: E1129 04:30:00.169059 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8734e05c-9806-444b-b0c7-31d795cc4e8a" containerName="mariadb-account-create-update" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169066 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8734e05c-9806-444b-b0c7-31d795cc4e8a" containerName="mariadb-account-create-update" Nov 29 04:30:00 crc kubenswrapper[4631]: E1129 04:30:00.169078 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0940a4d9-6460-4e3e-91c2-b84ac32e33c4" containerName="mariadb-account-create-update" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169086 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="0940a4d9-6460-4e3e-91c2-b84ac32e33c4" containerName="mariadb-account-create-update" Nov 29 04:30:00 crc kubenswrapper[4631]: E1129 04:30:00.169104 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfd7f275-e7d1-4239-b55a-b0566664e6bf" containerName="swift-ring-rebalance" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169112 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfd7f275-e7d1-4239-b55a-b0566664e6bf" containerName="swift-ring-rebalance" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169302 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="8734e05c-9806-444b-b0c7-31d795cc4e8a" containerName="mariadb-account-create-update" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169312 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="342b6f95-d7bc-491b-b9bf-a218b7825807" containerName="mariadb-database-create" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169352 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c103a4b-4b90-43c6-838f-8a71fb909eaa" containerName="mariadb-database-create" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169372 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e71ff62-6937-453f-9add-da82958c3990" containerName="mariadb-account-create-update" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169382 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfd7f275-e7d1-4239-b55a-b0566664e6bf" containerName="swift-ring-rebalance" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169396 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="0940a4d9-6460-4e3e-91c2-b84ac32e33c4" containerName="mariadb-account-create-update" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.169404 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="627bd4eb-a3ae-4a48-bccf-e65734ff396e" containerName="mariadb-database-create" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.170134 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.174284 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.174393 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.179824 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k"] Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.201828 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9335343-399a-4ce5-ae0a-927f764b6d04-secret-volume\") pod \"collect-profiles-29406510-2b84k\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.201941 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9335343-399a-4ce5-ae0a-927f764b6d04-config-volume\") pod \"collect-profiles-29406510-2b84k\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.201991 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g87z7\" (UniqueName: \"kubernetes.io/projected/b9335343-399a-4ce5-ae0a-927f764b6d04-kube-api-access-g87z7\") pod \"collect-profiles-29406510-2b84k\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.303593 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9335343-399a-4ce5-ae0a-927f764b6d04-config-volume\") pod \"collect-profiles-29406510-2b84k\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.303670 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g87z7\" (UniqueName: \"kubernetes.io/projected/b9335343-399a-4ce5-ae0a-927f764b6d04-kube-api-access-g87z7\") pod \"collect-profiles-29406510-2b84k\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.303772 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9335343-399a-4ce5-ae0a-927f764b6d04-secret-volume\") pod \"collect-profiles-29406510-2b84k\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.305883 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9335343-399a-4ce5-ae0a-927f764b6d04-config-volume\") pod \"collect-profiles-29406510-2b84k\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.310078 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9335343-399a-4ce5-ae0a-927f764b6d04-secret-volume\") pod \"collect-profiles-29406510-2b84k\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.323901 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g87z7\" (UniqueName: \"kubernetes.io/projected/b9335343-399a-4ce5-ae0a-927f764b6d04-kube-api-access-g87z7\") pod \"collect-profiles-29406510-2b84k\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.351066 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.498840 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:00 crc kubenswrapper[4631]: I1129 04:30:00.976674 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-fc5cp" podUID="9dac72cc-94dd-4863-92c6-99296142fafb" containerName="ovn-controller" probeResult="failure" output=< Nov 29 04:30:00 crc kubenswrapper[4631]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 04:30:00 crc kubenswrapper[4631]: > Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.016651 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.019816 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.022001 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-kl2kj" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.025833 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/874bb6b3-16cb-4d17-bf8b-6d3593d727d0-etc-swift\") pod \"swift-storage-0\" (UID: \"874bb6b3-16cb-4d17-bf8b-6d3593d727d0\") " pod="openstack/swift-storage-0" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.175404 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.261137 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-fc5cp-config-v9fsh"] Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.266058 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.272867 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.279464 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fc5cp-config-v9fsh"] Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.423096 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-scripts\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.423140 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-additional-scripts\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.423177 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfwdh\" (UniqueName: \"kubernetes.io/projected/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-kube-api-access-vfwdh\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.423236 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run-ovn\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.423258 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.423304 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-log-ovn\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.525070 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-log-ovn\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.525130 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-scripts\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.525149 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-additional-scripts\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.525184 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfwdh\" (UniqueName: \"kubernetes.io/projected/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-kube-api-access-vfwdh\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.525244 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run-ovn\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.525262 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.525434 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-log-ovn\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.525475 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run-ovn\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.525531 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.525934 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-additional-scripts\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.527597 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-scripts\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.541856 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfwdh\" (UniqueName: \"kubernetes.io/projected/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-kube-api-access-vfwdh\") pod \"ovn-controller-fc5cp-config-v9fsh\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:01 crc kubenswrapper[4631]: I1129 04:30:01.598489 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:05 crc kubenswrapper[4631]: E1129 04:30:05.104236 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 29 04:30:05 crc kubenswrapper[4631]: E1129 04:30:05.104859 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zzd87,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-57lmq_openstack(c6cdce96-7bd4-45c6-9597-6196ceee67ef): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:30:05 crc kubenswrapper[4631]: E1129 04:30:05.109106 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-57lmq" podUID="c6cdce96-7bd4-45c6-9597-6196ceee67ef" Nov 29 04:30:05 crc kubenswrapper[4631]: E1129 04:30:05.237531 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-57lmq" podUID="c6cdce96-7bd4-45c6-9597-6196ceee67ef" Nov 29 04:30:05 crc kubenswrapper[4631]: I1129 04:30:05.722953 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 29 04:30:05 crc kubenswrapper[4631]: W1129 04:30:05.730873 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod874bb6b3_16cb_4d17_bf8b_6d3593d727d0.slice/crio-d0e58ab5371c97af24cda6a90a270484bb5b2bd51aa44ef4e9503a14cde20a3e WatchSource:0}: Error finding container d0e58ab5371c97af24cda6a90a270484bb5b2bd51aa44ef4e9503a14cde20a3e: Status 404 returned error can't find the container with id d0e58ab5371c97af24cda6a90a270484bb5b2bd51aa44ef4e9503a14cde20a3e Nov 29 04:30:05 crc kubenswrapper[4631]: I1129 04:30:05.749893 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k"] Nov 29 04:30:05 crc kubenswrapper[4631]: W1129 04:30:05.751869 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9335343_399a_4ce5_ae0a_927f764b6d04.slice/crio-039d14e98ff22061af5d882354706e102c53245e7674e3526cce35f2db91666f WatchSource:0}: Error finding container 039d14e98ff22061af5d882354706e102c53245e7674e3526cce35f2db91666f: Status 404 returned error can't find the container with id 039d14e98ff22061af5d882354706e102c53245e7674e3526cce35f2db91666f Nov 29 04:30:05 crc kubenswrapper[4631]: I1129 04:30:05.788793 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fc5cp-config-v9fsh"] Nov 29 04:30:05 crc kubenswrapper[4631]: W1129 04:30:05.797216 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78a0d803_a7fb_487d_9d59_bb0aa13a5d2c.slice/crio-b8b43c6a874da9d1bf9d3d6a212850ed23a8febb2f0098f8a346150dc53a8038 WatchSource:0}: Error finding container b8b43c6a874da9d1bf9d3d6a212850ed23a8febb2f0098f8a346150dc53a8038: Status 404 returned error can't find the container with id b8b43c6a874da9d1bf9d3d6a212850ed23a8febb2f0098f8a346150dc53a8038 Nov 29 04:30:06 crc kubenswrapper[4631]: I1129 04:30:06.170814 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-fc5cp" podUID="9dac72cc-94dd-4863-92c6-99296142fafb" containerName="ovn-controller" probeResult="failure" output=< Nov 29 04:30:06 crc kubenswrapper[4631]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 29 04:30:06 crc kubenswrapper[4631]: > Nov 29 04:30:06 crc kubenswrapper[4631]: I1129 04:30:06.242196 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" event={"ID":"b9335343-399a-4ce5-ae0a-927f764b6d04","Type":"ContainerStarted","Data":"66c0429d8cff4c7754aa9bdff6b721c69598820e8acd549f8e5cf1b408a901f5"} Nov 29 04:30:06 crc kubenswrapper[4631]: I1129 04:30:06.242245 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" event={"ID":"b9335343-399a-4ce5-ae0a-927f764b6d04","Type":"ContainerStarted","Data":"039d14e98ff22061af5d882354706e102c53245e7674e3526cce35f2db91666f"} Nov 29 04:30:06 crc kubenswrapper[4631]: I1129 04:30:06.244361 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-w9m8l" event={"ID":"425113ab-54e0-4372-bdd1-587e2dc743d2","Type":"ContainerStarted","Data":"ba3b9807761eda10e3dfe76eed243b5c29abedbdc9d4902d9c1b0da9b6415c78"} Nov 29 04:30:06 crc kubenswrapper[4631]: I1129 04:30:06.245580 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"d0e58ab5371c97af24cda6a90a270484bb5b2bd51aa44ef4e9503a14cde20a3e"} Nov 29 04:30:06 crc kubenswrapper[4631]: I1129 04:30:06.247276 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fc5cp-config-v9fsh" event={"ID":"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c","Type":"ContainerStarted","Data":"07f60739dd49e34c3e697d6f139a2ed988c4c05a9fb4115b577dbb0ba02e551c"} Nov 29 04:30:06 crc kubenswrapper[4631]: I1129 04:30:06.247300 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fc5cp-config-v9fsh" event={"ID":"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c","Type":"ContainerStarted","Data":"b8b43c6a874da9d1bf9d3d6a212850ed23a8febb2f0098f8a346150dc53a8038"} Nov 29 04:30:06 crc kubenswrapper[4631]: I1129 04:30:06.256408 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" podStartSLOduration=6.256319238 podStartE2EDuration="6.256319238s" podCreationTimestamp="2025-11-29 04:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:30:06.255743584 +0000 UTC m=+1133.320247098" watchObservedRunningTime="2025-11-29 04:30:06.256319238 +0000 UTC m=+1133.320822752" Nov 29 04:30:06 crc kubenswrapper[4631]: I1129 04:30:06.280039 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-w9m8l" podStartSLOduration=2.943303509 podStartE2EDuration="21.280022695s" podCreationTimestamp="2025-11-29 04:29:45 +0000 UTC" firstStartedPulling="2025-11-29 04:29:46.837281519 +0000 UTC m=+1113.901785033" lastFinishedPulling="2025-11-29 04:30:05.174000645 +0000 UTC m=+1132.238504219" observedRunningTime="2025-11-29 04:30:06.275315511 +0000 UTC m=+1133.339819025" watchObservedRunningTime="2025-11-29 04:30:06.280022695 +0000 UTC m=+1133.344526209" Nov 29 04:30:07 crc kubenswrapper[4631]: I1129 04:30:07.268533 4631 generic.go:334] "Generic (PLEG): container finished" podID="78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" containerID="07f60739dd49e34c3e697d6f139a2ed988c4c05a9fb4115b577dbb0ba02e551c" exitCode=0 Nov 29 04:30:07 crc kubenswrapper[4631]: I1129 04:30:07.268800 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fc5cp-config-v9fsh" event={"ID":"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c","Type":"ContainerDied","Data":"07f60739dd49e34c3e697d6f139a2ed988c4c05a9fb4115b577dbb0ba02e551c"} Nov 29 04:30:07 crc kubenswrapper[4631]: I1129 04:30:07.271316 4631 generic.go:334] "Generic (PLEG): container finished" podID="b9335343-399a-4ce5-ae0a-927f764b6d04" containerID="66c0429d8cff4c7754aa9bdff6b721c69598820e8acd549f8e5cf1b408a901f5" exitCode=0 Nov 29 04:30:07 crc kubenswrapper[4631]: I1129 04:30:07.272049 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" event={"ID":"b9335343-399a-4ce5-ae0a-927f764b6d04","Type":"ContainerDied","Data":"66c0429d8cff4c7754aa9bdff6b721c69598820e8acd549f8e5cf1b408a901f5"} Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.281789 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"939229a6a156a0cf60f7692d8d8e27fa81450725b372f9e39466fefea57ebd72"} Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.715626 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.723324 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.848493 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-additional-scripts\") pod \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.848586 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9335343-399a-4ce5-ae0a-927f764b6d04-secret-volume\") pod \"b9335343-399a-4ce5-ae0a-927f764b6d04\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.848627 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run-ovn\") pod \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.848648 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-log-ovn\") pod \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.848665 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9335343-399a-4ce5-ae0a-927f764b6d04-config-volume\") pod \"b9335343-399a-4ce5-ae0a-927f764b6d04\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.848717 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfwdh\" (UniqueName: \"kubernetes.io/projected/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-kube-api-access-vfwdh\") pod \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.848784 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-scripts\") pod \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.848824 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run\") pod \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\" (UID: \"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c\") " Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.848884 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g87z7\" (UniqueName: \"kubernetes.io/projected/b9335343-399a-4ce5-ae0a-927f764b6d04-kube-api-access-g87z7\") pod \"b9335343-399a-4ce5-ae0a-927f764b6d04\" (UID: \"b9335343-399a-4ce5-ae0a-927f764b6d04\") " Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.849290 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" (UID: "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.849634 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" (UID: "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.849651 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9335343-399a-4ce5-ae0a-927f764b6d04-config-volume" (OuterVolumeSpecName: "config-volume") pod "b9335343-399a-4ce5-ae0a-927f764b6d04" (UID: "b9335343-399a-4ce5-ae0a-927f764b6d04"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.849680 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" (UID: "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.850464 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run" (OuterVolumeSpecName: "var-run") pod "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" (UID: "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.851932 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-scripts" (OuterVolumeSpecName: "scripts") pod "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" (UID: "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.860585 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9335343-399a-4ce5-ae0a-927f764b6d04-kube-api-access-g87z7" (OuterVolumeSpecName: "kube-api-access-g87z7") pod "b9335343-399a-4ce5-ae0a-927f764b6d04" (UID: "b9335343-399a-4ce5-ae0a-927f764b6d04"). InnerVolumeSpecName "kube-api-access-g87z7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.860671 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9335343-399a-4ce5-ae0a-927f764b6d04-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b9335343-399a-4ce5-ae0a-927f764b6d04" (UID: "b9335343-399a-4ce5-ae0a-927f764b6d04"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.871579 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-kube-api-access-vfwdh" (OuterVolumeSpecName: "kube-api-access-vfwdh") pod "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" (UID: "78a0d803-a7fb-487d-9d59-bb0aa13a5d2c"). InnerVolumeSpecName "kube-api-access-vfwdh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.951175 4631 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.951425 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g87z7\" (UniqueName: \"kubernetes.io/projected/b9335343-399a-4ce5-ae0a-927f764b6d04-kube-api-access-g87z7\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.951511 4631 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.951614 4631 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9335343-399a-4ce5-ae0a-927f764b6d04-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.951690 4631 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.951763 4631 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.951870 4631 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9335343-399a-4ce5-ae0a-927f764b6d04-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.951943 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfwdh\" (UniqueName: \"kubernetes.io/projected/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-kube-api-access-vfwdh\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:08 crc kubenswrapper[4631]: I1129 04:30:08.952022 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.294126 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fc5cp-config-v9fsh" event={"ID":"78a0d803-a7fb-487d-9d59-bb0aa13a5d2c","Type":"ContainerDied","Data":"b8b43c6a874da9d1bf9d3d6a212850ed23a8febb2f0098f8a346150dc53a8038"} Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.294187 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8b43c6a874da9d1bf9d3d6a212850ed23a8febb2f0098f8a346150dc53a8038" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.294287 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fc5cp-config-v9fsh" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.302866 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" event={"ID":"b9335343-399a-4ce5-ae0a-927f764b6d04","Type":"ContainerDied","Data":"039d14e98ff22061af5d882354706e102c53245e7674e3526cce35f2db91666f"} Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.302994 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="039d14e98ff22061af5d882354706e102c53245e7674e3526cce35f2db91666f" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.303114 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.309143 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"1839b82e1206f07acdd141c284f9fa2a11395c38488014b3e86bae977d658885"} Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.309185 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"b62fbde563634e0308a1e4627101a45c049cdbf0c1590366ab6d2b96e62a1663"} Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.309198 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"cbb89e12f250ada5bfd99e33b4dd246253be7bba9b2aa27b6d54c2b68b1a62ef"} Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.858065 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-fc5cp-config-v9fsh"] Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.869380 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-fc5cp-config-v9fsh"] Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.949169 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-fc5cp-config-dnp4v"] Nov 29 04:30:09 crc kubenswrapper[4631]: E1129 04:30:09.949512 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" containerName="ovn-config" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.949528 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" containerName="ovn-config" Nov 29 04:30:09 crc kubenswrapper[4631]: E1129 04:30:09.949549 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9335343-399a-4ce5-ae0a-927f764b6d04" containerName="collect-profiles" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.949563 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9335343-399a-4ce5-ae0a-927f764b6d04" containerName="collect-profiles" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.949726 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" containerName="ovn-config" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.949743 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9335343-399a-4ce5-ae0a-927f764b6d04" containerName="collect-profiles" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.950246 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.952109 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 29 04:30:09 crc kubenswrapper[4631]: I1129 04:30:09.961615 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fc5cp-config-dnp4v"] Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.072667 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwstf\" (UniqueName: \"kubernetes.io/projected/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-kube-api-access-wwstf\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.072703 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-additional-scripts\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.072836 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.072861 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run-ovn\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.072898 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-scripts\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.072928 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-log-ovn\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.174103 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-log-ovn\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.174198 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwstf\" (UniqueName: \"kubernetes.io/projected/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-kube-api-access-wwstf\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.174218 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-additional-scripts\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.174279 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.174294 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run-ovn\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.174311 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-scripts\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.175453 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-log-ovn\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.175696 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-additional-scripts\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.175767 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.175799 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run-ovn\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.176203 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-scripts\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.211990 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwstf\" (UniqueName: \"kubernetes.io/projected/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-kube-api-access-wwstf\") pod \"ovn-controller-fc5cp-config-dnp4v\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: E1129 04:30:10.239006 4631 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod425113ab_54e0_4372_bdd1_587e2dc743d2.slice/crio-ba3b9807761eda10e3dfe76eed243b5c29abedbdc9d4902d9c1b0da9b6415c78.scope\": RecentStats: unable to find data in memory cache]" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.269088 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.340468 4631 generic.go:334] "Generic (PLEG): container finished" podID="425113ab-54e0-4372-bdd1-587e2dc743d2" containerID="ba3b9807761eda10e3dfe76eed243b5c29abedbdc9d4902d9c1b0da9b6415c78" exitCode=0 Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.340519 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-w9m8l" event={"ID":"425113ab-54e0-4372-bdd1-587e2dc743d2","Type":"ContainerDied","Data":"ba3b9807761eda10e3dfe76eed243b5c29abedbdc9d4902d9c1b0da9b6415c78"} Nov 29 04:30:10 crc kubenswrapper[4631]: I1129 04:30:10.985995 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-fc5cp" Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.092583 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-fc5cp-config-dnp4v"] Nov 29 04:30:11 crc kubenswrapper[4631]: W1129 04:30:11.108259 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4891bd0_8ad8_4e42_a0cc_b3b45365784c.slice/crio-45c8af8ec2fd3a0df1745393d7d0e734039b36afcf991bbee37bf6d3a752ba24 WatchSource:0}: Error finding container 45c8af8ec2fd3a0df1745393d7d0e734039b36afcf991bbee37bf6d3a752ba24: Status 404 returned error can't find the container with id 45c8af8ec2fd3a0df1745393d7d0e734039b36afcf991bbee37bf6d3a752ba24 Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.258811 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78a0d803-a7fb-487d-9d59-bb0aa13a5d2c" path="/var/lib/kubelet/pods/78a0d803-a7fb-487d-9d59-bb0aa13a5d2c/volumes" Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.351013 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"e963155fc283c8965931d35923fe7c400f4ec8617c2b8e89bd9acb548544f3b9"} Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.351056 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"1ede87fe4de06cdb1465a1e3c04414e09f11df7bff3ddeac33038c8b743622b6"} Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.351065 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"b3fec1f5adea1de3d62f2451c55e7b0b201bd1549aa534d7483a203c195d63d8"} Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.352646 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fc5cp-config-dnp4v" event={"ID":"c4891bd0-8ad8-4e42-a0cc-b3b45365784c","Type":"ContainerStarted","Data":"45c8af8ec2fd3a0df1745393d7d0e734039b36afcf991bbee37bf6d3a752ba24"} Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.664837 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.805494 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdzwc\" (UniqueName: \"kubernetes.io/projected/425113ab-54e0-4372-bdd1-587e2dc743d2-kube-api-access-tdzwc\") pod \"425113ab-54e0-4372-bdd1-587e2dc743d2\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.805645 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-config-data\") pod \"425113ab-54e0-4372-bdd1-587e2dc743d2\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.806348 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-combined-ca-bundle\") pod \"425113ab-54e0-4372-bdd1-587e2dc743d2\" (UID: \"425113ab-54e0-4372-bdd1-587e2dc743d2\") " Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.813978 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/425113ab-54e0-4372-bdd1-587e2dc743d2-kube-api-access-tdzwc" (OuterVolumeSpecName: "kube-api-access-tdzwc") pod "425113ab-54e0-4372-bdd1-587e2dc743d2" (UID: "425113ab-54e0-4372-bdd1-587e2dc743d2"). InnerVolumeSpecName "kube-api-access-tdzwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.830493 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "425113ab-54e0-4372-bdd1-587e2dc743d2" (UID: "425113ab-54e0-4372-bdd1-587e2dc743d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.854731 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-config-data" (OuterVolumeSpecName: "config-data") pod "425113ab-54e0-4372-bdd1-587e2dc743d2" (UID: "425113ab-54e0-4372-bdd1-587e2dc743d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.908358 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdzwc\" (UniqueName: \"kubernetes.io/projected/425113ab-54e0-4372-bdd1-587e2dc743d2-kube-api-access-tdzwc\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.908390 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:11 crc kubenswrapper[4631]: I1129 04:30:11.908400 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/425113ab-54e0-4372-bdd1-587e2dc743d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.364327 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-w9m8l" event={"ID":"425113ab-54e0-4372-bdd1-587e2dc743d2","Type":"ContainerDied","Data":"403bc32a3bf6f47e205990052f9794b31bb9198e2fc2c489c7b731828a349c4a"} Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.364411 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="403bc32a3bf6f47e205990052f9794b31bb9198e2fc2c489c7b731828a349c4a" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.365663 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-w9m8l" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.368897 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"86104c1cac257763075b31a075daffece16d46a6433637c259381aafe7f5b464"} Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.370695 4631 generic.go:334] "Generic (PLEG): container finished" podID="c4891bd0-8ad8-4e42-a0cc-b3b45365784c" containerID="cec8aba9383136d5e254e8796ae2d99a5d54042238ec8803eb725bec4e35a161" exitCode=0 Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.370721 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fc5cp-config-dnp4v" event={"ID":"c4891bd0-8ad8-4e42-a0cc-b3b45365784c","Type":"ContainerDied","Data":"cec8aba9383136d5e254e8796ae2d99a5d54042238ec8803eb725bec4e35a161"} Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.662386 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-kc6tv"] Nov 29 04:30:12 crc kubenswrapper[4631]: E1129 04:30:12.662716 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="425113ab-54e0-4372-bdd1-587e2dc743d2" containerName="keystone-db-sync" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.662731 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="425113ab-54e0-4372-bdd1-587e2dc743d2" containerName="keystone-db-sync" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.662878 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="425113ab-54e0-4372-bdd1-587e2dc743d2" containerName="keystone-db-sync" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.663693 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.687491 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-kc6tv"] Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.711939 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-jshqq"] Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.712981 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.719812 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.719992 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.720092 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-zjswg" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.720217 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.720858 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.748478 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jshqq"] Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.831092 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-scripts\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.831347 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-config\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.831426 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-config-data\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.831501 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9p28\" (UniqueName: \"kubernetes.io/projected/4fe6877d-5adf-4b60-9a97-eb309b37fde9-kube-api-access-m9p28\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.831594 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wmdv\" (UniqueName: \"kubernetes.io/projected/2d449f49-3eee-47b0-9367-326e093df05a-kube-api-access-5wmdv\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.831807 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-credential-keys\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.831865 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-fernet-keys\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.831975 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-nb\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.831991 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-sb\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.832010 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-combined-ca-bundle\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.832030 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-dns-svc\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.869463 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-849868977-5t6pd"] Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.874139 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.881775 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.881961 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.882045 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.882207 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-d9gnb" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.902170 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-849868977-5t6pd"] Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935126 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-credential-keys\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935176 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-fernet-keys\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935220 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-combined-ca-bundle\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935237 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-nb\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935251 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-sb\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935269 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-dns-svc\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935314 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-scripts\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935389 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-config\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935410 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-config-data\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935435 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9p28\" (UniqueName: \"kubernetes.io/projected/4fe6877d-5adf-4b60-9a97-eb309b37fde9-kube-api-access-m9p28\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.935456 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wmdv\" (UniqueName: \"kubernetes.io/projected/2d449f49-3eee-47b0-9367-326e093df05a-kube-api-access-5wmdv\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.959005 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-dns-svc\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.960653 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-config\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.964104 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-credential-keys\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.967077 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-nb\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.970481 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-7882p"] Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.970858 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-fernet-keys\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.971144 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-sb\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.971637 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.971711 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-scripts\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.972776 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-7gknv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.972936 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.973787 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.979067 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-combined-ca-bundle\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.983091 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-config-data\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.990408 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.991975 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wmdv\" (UniqueName: \"kubernetes.io/projected/2d449f49-3eee-47b0-9367-326e093df05a-kube-api-access-5wmdv\") pod \"dnsmasq-dns-f877ddd87-kc6tv\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:12 crc kubenswrapper[4631]: I1129 04:30:12.992830 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.019750 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.023473 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.023911 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.033513 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.036899 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-7882p"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.042250 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-scripts\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.042294 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d194b14b-2b7c-4903-9361-e09887f4057f-horizon-secret-key\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.042314 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z5s6\" (UniqueName: \"kubernetes.io/projected/d194b14b-2b7c-4903-9361-e09887f4057f-kube-api-access-5z5s6\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.042400 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d194b14b-2b7c-4903-9361-e09887f4057f-logs\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.042434 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-config-data\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.055857 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9p28\" (UniqueName: \"kubernetes.io/projected/4fe6877d-5adf-4b60-9a97-eb309b37fde9-kube-api-access-m9p28\") pod \"keystone-bootstrap-jshqq\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.056193 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144278 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d194b14b-2b7c-4903-9361-e09887f4057f-horizon-secret-key\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144318 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-run-httpd\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144350 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z5s6\" (UniqueName: \"kubernetes.io/projected/d194b14b-2b7c-4903-9361-e09887f4057f-kube-api-access-5z5s6\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144402 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d194b14b-2b7c-4903-9361-e09887f4057f-logs\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144419 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-combined-ca-bundle\") pod \"neutron-db-sync-7882p\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144450 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-config-data\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144474 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-log-httpd\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144501 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpzxp\" (UniqueName: \"kubernetes.io/projected/30557b8c-2204-4118-a123-8fb42dc36b19-kube-api-access-hpzxp\") pod \"neutron-db-sync-7882p\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144515 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-config-data\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144532 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144551 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-config\") pod \"neutron-db-sync-7882p\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144598 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144628 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-scripts\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144644 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nw56\" (UniqueName: \"kubernetes.io/projected/782a0b9b-d16f-495e-a648-e8a03af1e2d2-kube-api-access-9nw56\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.144661 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-scripts\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.145262 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d194b14b-2b7c-4903-9361-e09887f4057f-logs\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.157816 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.157922 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.158034 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.158856 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-scripts\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.159613 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-config-data\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.210445 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z5s6\" (UniqueName: \"kubernetes.io/projected/d194b14b-2b7c-4903-9361-e09887f4057f-kube-api-access-5z5s6\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.270635 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d194b14b-2b7c-4903-9361-e09887f4057f-horizon-secret-key\") pod \"horizon-849868977-5t6pd\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.271863 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-combined-ca-bundle\") pod \"neutron-db-sync-7882p\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.271937 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-log-httpd\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.271973 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpzxp\" (UniqueName: \"kubernetes.io/projected/30557b8c-2204-4118-a123-8fb42dc36b19-kube-api-access-hpzxp\") pod \"neutron-db-sync-7882p\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.271987 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-config-data\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.272008 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.272029 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-config\") pod \"neutron-db-sync-7882p\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.272097 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.272137 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nw56\" (UniqueName: \"kubernetes.io/projected/782a0b9b-d16f-495e-a648-e8a03af1e2d2-kube-api-access-9nw56\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.272153 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-scripts\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.272179 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-run-httpd\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.335245 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-combined-ca-bundle\") pod \"neutron-db-sync-7882p\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.340859 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-run-httpd\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.373950 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-log-httpd\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.381310 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.400814 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.423705 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.446190 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-scripts\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.454249 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-config-data\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.504367 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.508220 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-config\") pod \"neutron-db-sync-7882p\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.509634 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-cztsz"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.511952 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.516463 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.536656 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-cztsz"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.536698 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-df4bd449c-d2k7v"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.538143 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.544115 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpzxp\" (UniqueName: \"kubernetes.io/projected/30557b8c-2204-4118-a123-8fb42dc36b19-kube-api-access-hpzxp\") pod \"neutron-db-sync-7882p\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.544434 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-df4bd449c-d2k7v"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.559833 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-d9gnb" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.560111 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.560219 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-7dnj9" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.560366 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.576812 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.584704 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nw56\" (UniqueName: \"kubernetes.io/projected/782a0b9b-d16f-495e-a648-e8a03af1e2d2-kube-api-access-9nw56\") pod \"ceilometer-0\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.632679 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-kc6tv"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.649708 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-scripts\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.649769 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-combined-ca-bundle\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.649801 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2fgr\" (UniqueName: \"kubernetes.io/projected/d646890b-5054-4ad5-9dc0-940a5e397fd0-kube-api-access-m2fgr\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.649832 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mz7zg\" (UniqueName: \"kubernetes.io/projected/25c11a81-f1b8-46c8-aed5-7875a37dbd06-kube-api-access-mz7zg\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.649859 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25c11a81-f1b8-46c8-aed5-7875a37dbd06-logs\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.649899 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-scripts\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.649919 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d646890b-5054-4ad5-9dc0-940a5e397fd0-etc-machine-id\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.649950 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-config-data\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.649981 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-config-data\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.650011 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-db-sync-config-data\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.650039 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/25c11a81-f1b8-46c8-aed5-7875a37dbd06-horizon-secret-key\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.659686 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-4zg2x"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.735457 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-7gknv" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.735831 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.739697 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751198 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d646890b-5054-4ad5-9dc0-940a5e397fd0-etc-machine-id\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751237 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-scripts\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751272 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-config-data\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751308 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-config-data\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751346 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-db-sync-config-data\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751371 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/25c11a81-f1b8-46c8-aed5-7875a37dbd06-horizon-secret-key\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751425 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-scripts\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751445 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-combined-ca-bundle\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751465 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2fgr\" (UniqueName: \"kubernetes.io/projected/d646890b-5054-4ad5-9dc0-940a5e397fd0-kube-api-access-m2fgr\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751493 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mz7zg\" (UniqueName: \"kubernetes.io/projected/25c11a81-f1b8-46c8-aed5-7875a37dbd06-kube-api-access-mz7zg\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751513 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25c11a81-f1b8-46c8-aed5-7875a37dbd06-logs\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751875 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25c11a81-f1b8-46c8-aed5-7875a37dbd06-logs\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.751921 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d646890b-5054-4ad5-9dc0-940a5e397fd0-etc-machine-id\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.752396 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-scripts\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.772182 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.773425 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-db-sync-config-data\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.775262 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-config-data\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.776288 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-config-data\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.791414 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-k9j2z" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.791628 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.793096 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-jmcnk"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.793242 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-combined-ca-bundle\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.799106 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.800321 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/25c11a81-f1b8-46c8-aed5-7875a37dbd06-horizon-secret-key\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.802371 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-scripts\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.824655 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-4zg2x"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.829982 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.831949 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-8w6dr" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.832079 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.840634 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2fgr\" (UniqueName: \"kubernetes.io/projected/d646890b-5054-4ad5-9dc0-940a5e397fd0-kube-api-access-m2fgr\") pod \"cinder-db-sync-cztsz\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.841949 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mz7zg\" (UniqueName: \"kubernetes.io/projected/25c11a81-f1b8-46c8-aed5-7875a37dbd06-kube-api-access-mz7zg\") pod \"horizon-df4bd449c-d2k7v\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.851157 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-jmcnk"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.855046 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-config-data\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.855099 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-db-sync-config-data\") pod \"barbican-db-sync-4zg2x\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.855670 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2h4b\" (UniqueName: \"kubernetes.io/projected/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-kube-api-access-r2h4b\") pod \"barbican-db-sync-4zg2x\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.855711 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-combined-ca-bundle\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.855732 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a612579-d131-4dbd-85bc-ba455a26db3b-logs\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.855773 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-scripts\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.855799 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-combined-ca-bundle\") pod \"barbican-db-sync-4zg2x\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.855897 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcbk2\" (UniqueName: \"kubernetes.io/projected/8a612579-d131-4dbd-85bc-ba455a26db3b-kube-api-access-vcbk2\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.957631 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2h4b\" (UniqueName: \"kubernetes.io/projected/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-kube-api-access-r2h4b\") pod \"barbican-db-sync-4zg2x\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.957679 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-combined-ca-bundle\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.957701 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a612579-d131-4dbd-85bc-ba455a26db3b-logs\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.957742 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-scripts\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.957766 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-combined-ca-bundle\") pod \"barbican-db-sync-4zg2x\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.957795 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcbk2\" (UniqueName: \"kubernetes.io/projected/8a612579-d131-4dbd-85bc-ba455a26db3b-kube-api-access-vcbk2\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.957818 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-config-data\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.957844 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-db-sync-config-data\") pod \"barbican-db-sync-4zg2x\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.962418 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-2p6j8"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.963792 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.965354 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a612579-d131-4dbd-85bc-ba455a26db3b-logs\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.967510 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-combined-ca-bundle\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.967590 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-combined-ca-bundle\") pod \"barbican-db-sync-4zg2x\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.970127 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-config-data\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.981824 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-db-sync-config-data\") pod \"barbican-db-sync-4zg2x\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.983169 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-2p6j8"] Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.983587 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-scripts\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.995349 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cztsz" Nov 29 04:30:13 crc kubenswrapper[4631]: I1129 04:30:13.999797 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2h4b\" (UniqueName: \"kubernetes.io/projected/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-kube-api-access-r2h4b\") pod \"barbican-db-sync-4zg2x\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.002143 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcbk2\" (UniqueName: \"kubernetes.io/projected/8a612579-d131-4dbd-85bc-ba455a26db3b-kube-api-access-vcbk2\") pod \"placement-db-sync-jmcnk\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.003554 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.057006 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jshqq"] Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.113648 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.134480 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jmcnk" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.161371 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-dns-svc\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.161414 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-sb\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.161449 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbcd9\" (UniqueName: \"kubernetes.io/projected/e6972f45-d644-4118-8c37-2ca075a65b12-kube-api-access-wbcd9\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.161515 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-config\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.161540 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-nb\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.264242 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-dns-svc\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.264603 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-sb\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.264637 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbcd9\" (UniqueName: \"kubernetes.io/projected/e6972f45-d644-4118-8c37-2ca075a65b12-kube-api-access-wbcd9\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.264712 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-config\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.264737 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-nb\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.265783 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-nb\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.265828 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-dns-svc\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.265879 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-sb\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.266355 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-config\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.300083 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-kc6tv"] Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.308186 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbcd9\" (UniqueName: \"kubernetes.io/projected/e6972f45-d644-4118-8c37-2ca075a65b12-kube-api-access-wbcd9\") pod \"dnsmasq-dns-68dcc9cf6f-2p6j8\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.327302 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.424144 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.520102 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.543376 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" event={"ID":"2d449f49-3eee-47b0-9367-326e093df05a","Type":"ContainerStarted","Data":"e5fc5b72975b45d622658107140baa04592e2bc2beefefb8cc244422b9d025e4"} Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.544578 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-fc5cp-config-dnp4v" event={"ID":"c4891bd0-8ad8-4e42-a0cc-b3b45365784c","Type":"ContainerDied","Data":"45c8af8ec2fd3a0df1745393d7d0e734039b36afcf991bbee37bf6d3a752ba24"} Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.544596 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45c8af8ec2fd3a0df1745393d7d0e734039b36afcf991bbee37bf6d3a752ba24" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.544639 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-fc5cp-config-dnp4v" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.553633 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jshqq" event={"ID":"4fe6877d-5adf-4b60-9a97-eb309b37fde9","Type":"ContainerStarted","Data":"afc91ebb46eedd63b9b3ce27846f6cf1c188b5d7ce0805766095535615796e30"} Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.678175 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-additional-scripts\") pod \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.678572 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-scripts\") pod \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.678589 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-log-ovn\") pod \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.678608 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwstf\" (UniqueName: \"kubernetes.io/projected/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-kube-api-access-wwstf\") pod \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.678637 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run\") pod \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.678691 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run-ovn\") pod \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\" (UID: \"c4891bd0-8ad8-4e42-a0cc-b3b45365784c\") " Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.678914 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "c4891bd0-8ad8-4e42-a0cc-b3b45365784c" (UID: "c4891bd0-8ad8-4e42-a0cc-b3b45365784c"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.679589 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "c4891bd0-8ad8-4e42-a0cc-b3b45365784c" (UID: "c4891bd0-8ad8-4e42-a0cc-b3b45365784c"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.680254 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-scripts" (OuterVolumeSpecName: "scripts") pod "c4891bd0-8ad8-4e42-a0cc-b3b45365784c" (UID: "c4891bd0-8ad8-4e42-a0cc-b3b45365784c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.680277 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "c4891bd0-8ad8-4e42-a0cc-b3b45365784c" (UID: "c4891bd0-8ad8-4e42-a0cc-b3b45365784c"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.680429 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run" (OuterVolumeSpecName: "var-run") pod "c4891bd0-8ad8-4e42-a0cc-b3b45365784c" (UID: "c4891bd0-8ad8-4e42-a0cc-b3b45365784c"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.698996 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-kube-api-access-wwstf" (OuterVolumeSpecName: "kube-api-access-wwstf") pod "c4891bd0-8ad8-4e42-a0cc-b3b45365784c" (UID: "c4891bd0-8ad8-4e42-a0cc-b3b45365784c"). InnerVolumeSpecName "kube-api-access-wwstf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.790259 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.790616 4631 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.790627 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwstf\" (UniqueName: \"kubernetes.io/projected/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-kube-api-access-wwstf\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.790639 4631 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.790652 4631 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:14 crc kubenswrapper[4631]: I1129 04:30:14.790662 4631 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c4891bd0-8ad8-4e42-a0cc-b3b45365784c-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.192280 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-jmcnk"] Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.199519 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-7882p"] Nov 29 04:30:15 crc kubenswrapper[4631]: W1129 04:30:15.248478 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30557b8c_2204_4118_a123_8fb42dc36b19.slice/crio-76d82171d2dbb683de177577c5ecc5c68f6c5f76d232d651ad02ab30c8b1acb8 WatchSource:0}: Error finding container 76d82171d2dbb683de177577c5ecc5c68f6c5f76d232d651ad02ab30c8b1acb8: Status 404 returned error can't find the container with id 76d82171d2dbb683de177577c5ecc5c68f6c5f76d232d651ad02ab30c8b1acb8 Nov 29 04:30:15 crc kubenswrapper[4631]: W1129 04:30:15.250258 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a612579_d131_4dbd_85bc_ba455a26db3b.slice/crio-0f356b678d94a731123a4e141da237e9a76b3020cce9d2f08bacf4d2fd0d20a0 WatchSource:0}: Error finding container 0f356b678d94a731123a4e141da237e9a76b3020cce9d2f08bacf4d2fd0d20a0: Status 404 returned error can't find the container with id 0f356b678d94a731123a4e141da237e9a76b3020cce9d2f08bacf4d2fd0d20a0 Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.305268 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-849868977-5t6pd"] Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.370056 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.408310 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-df4bd449c-d2k7v"] Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.514832 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-849868977-5t6pd"] Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.575826 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-58dfd7ff49-n5djz"] Nov 29 04:30:15 crc kubenswrapper[4631]: E1129 04:30:15.576201 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4891bd0-8ad8-4e42-a0cc-b3b45365784c" containerName="ovn-config" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.576212 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4891bd0-8ad8-4e42-a0cc-b3b45365784c" containerName="ovn-config" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.576387 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4891bd0-8ad8-4e42-a0cc-b3b45365784c" containerName="ovn-config" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.581561 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.588145 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"782a0b9b-d16f-495e-a648-e8a03af1e2d2","Type":"ContainerStarted","Data":"e53ab7793075cb925762e89dfacc1068f15674e180599c6b79ff47823ed6dfe1"} Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.604509 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jmcnk" event={"ID":"8a612579-d131-4dbd-85bc-ba455a26db3b","Type":"ContainerStarted","Data":"0f356b678d94a731123a4e141da237e9a76b3020cce9d2f08bacf4d2fd0d20a0"} Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.610099 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" podUID="2d449f49-3eee-47b0-9367-326e093df05a" containerName="init" containerID="cri-o://b849cd30dc68a798c6710663fe42399477ab4cf5f7a87d71ee41696dcff93b9c" gracePeriod=10 Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.612530 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7882p" event={"ID":"30557b8c-2204-4118-a123-8fb42dc36b19","Type":"ContainerStarted","Data":"76d82171d2dbb683de177577c5ecc5c68f6c5f76d232d651ad02ab30c8b1acb8"} Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.633424 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-849868977-5t6pd" event={"ID":"d194b14b-2b7c-4903-9361-e09887f4057f","Type":"ContainerStarted","Data":"3ebe41b2120663e3a68a13a434ee18c7acebe4ec6e5f9911f71e47dfc140acbc"} Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.638050 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58dfd7ff49-n5djz"] Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.643445 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-df4bd449c-d2k7v" event={"ID":"25c11a81-f1b8-46c8-aed5-7875a37dbd06","Type":"ContainerStarted","Data":"73c5f557b838bdbff7516fee9215afd7a764355c0ce03de6578fce520256397e"} Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.702423 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.708252 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bq7rz\" (UniqueName: \"kubernetes.io/projected/9f926252-a86c-4c5b-a394-88c721681c4c-kube-api-access-bq7rz\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.708294 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-scripts\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.708375 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9f926252-a86c-4c5b-a394-88c721681c4c-horizon-secret-key\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.708422 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f926252-a86c-4c5b-a394-88c721681c4c-logs\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.708470 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-config-data\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.726698 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-fc5cp-config-dnp4v"] Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.738650 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-fc5cp-config-dnp4v"] Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.810458 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9f926252-a86c-4c5b-a394-88c721681c4c-horizon-secret-key\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.810503 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f926252-a86c-4c5b-a394-88c721681c4c-logs\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.810547 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-config-data\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.810628 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bq7rz\" (UniqueName: \"kubernetes.io/projected/9f926252-a86c-4c5b-a394-88c721681c4c-kube-api-access-bq7rz\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.810649 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-scripts\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.812109 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f926252-a86c-4c5b-a394-88c721681c4c-logs\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.812243 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-scripts\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.812466 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-config-data\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.817777 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9f926252-a86c-4c5b-a394-88c721681c4c-horizon-secret-key\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.831244 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bq7rz\" (UniqueName: \"kubernetes.io/projected/9f926252-a86c-4c5b-a394-88c721681c4c-kube-api-access-bq7rz\") pod \"horizon-58dfd7ff49-n5djz\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.913709 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:15 crc kubenswrapper[4631]: I1129 04:30:15.953840 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-4zg2x"] Nov 29 04:30:16 crc kubenswrapper[4631]: I1129 04:30:16.075220 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-cztsz"] Nov 29 04:30:16 crc kubenswrapper[4631]: I1129 04:30:16.094110 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-2p6j8"] Nov 29 04:30:16 crc kubenswrapper[4631]: W1129 04:30:16.640994 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77e723d0_49bf_4008_bbce_7c1fe2ad3a5d.slice/crio-94a26d1ac58b90921c5aa447e84d3bf1c1a3c7851eae14f2b58a0fd49c3c0a74 WatchSource:0}: Error finding container 94a26d1ac58b90921c5aa447e84d3bf1c1a3c7851eae14f2b58a0fd49c3c0a74: Status 404 returned error can't find the container with id 94a26d1ac58b90921c5aa447e84d3bf1c1a3c7851eae14f2b58a0fd49c3c0a74 Nov 29 04:30:16 crc kubenswrapper[4631]: W1129 04:30:16.668057 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd646890b_5054_4ad5_9dc0_940a5e397fd0.slice/crio-7cf43755c2614aab77515d1114e13f2829d88ef66924e856dc8015272e6c6ef8 WatchSource:0}: Error finding container 7cf43755c2614aab77515d1114e13f2829d88ef66924e856dc8015272e6c6ef8: Status 404 returned error can't find the container with id 7cf43755c2614aab77515d1114e13f2829d88ef66924e856dc8015272e6c6ef8 Nov 29 04:30:16 crc kubenswrapper[4631]: I1129 04:30:16.747794 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jshqq" event={"ID":"4fe6877d-5adf-4b60-9a97-eb309b37fde9","Type":"ContainerStarted","Data":"809fd41a7c49c20fb18551e80cf3895e47ae14d32e10f53e1382dc48742f2ebe"} Nov 29 04:30:16 crc kubenswrapper[4631]: I1129 04:30:16.773811 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-jshqq" podStartSLOduration=4.773794877 podStartE2EDuration="4.773794877s" podCreationTimestamp="2025-11-29 04:30:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:30:16.767070544 +0000 UTC m=+1143.831574058" watchObservedRunningTime="2025-11-29 04:30:16.773794877 +0000 UTC m=+1143.838298391" Nov 29 04:30:16 crc kubenswrapper[4631]: I1129 04:30:16.781942 4631 generic.go:334] "Generic (PLEG): container finished" podID="2d449f49-3eee-47b0-9367-326e093df05a" containerID="b849cd30dc68a798c6710663fe42399477ab4cf5f7a87d71ee41696dcff93b9c" exitCode=0 Nov 29 04:30:16 crc kubenswrapper[4631]: I1129 04:30:16.782055 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" event={"ID":"2d449f49-3eee-47b0-9367-326e093df05a","Type":"ContainerDied","Data":"b849cd30dc68a798c6710663fe42399477ab4cf5f7a87d71ee41696dcff93b9c"} Nov 29 04:30:16 crc kubenswrapper[4631]: I1129 04:30:16.788650 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-4zg2x" event={"ID":"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d","Type":"ContainerStarted","Data":"94a26d1ac58b90921c5aa447e84d3bf1c1a3c7851eae14f2b58a0fd49c3c0a74"} Nov 29 04:30:16 crc kubenswrapper[4631]: I1129 04:30:16.813293 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7882p" event={"ID":"30557b8c-2204-4118-a123-8fb42dc36b19","Type":"ContainerStarted","Data":"ae5b2bdd4d4687b9e130c1e5e7c365ad1d8e48fd67ea533d600227b1a2db6a1b"} Nov 29 04:30:16 crc kubenswrapper[4631]: I1129 04:30:16.849435 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-7882p" podStartSLOduration=4.849414268 podStartE2EDuration="4.849414268s" podCreationTimestamp="2025-11-29 04:30:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:30:16.831530203 +0000 UTC m=+1143.896033707" watchObservedRunningTime="2025-11-29 04:30:16.849414268 +0000 UTC m=+1143.913917782" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.083618 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.248604 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wmdv\" (UniqueName: \"kubernetes.io/projected/2d449f49-3eee-47b0-9367-326e093df05a-kube-api-access-5wmdv\") pod \"2d449f49-3eee-47b0-9367-326e093df05a\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.249010 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-sb\") pod \"2d449f49-3eee-47b0-9367-326e093df05a\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.251540 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-config\") pod \"2d449f49-3eee-47b0-9367-326e093df05a\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.251774 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-nb\") pod \"2d449f49-3eee-47b0-9367-326e093df05a\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.251804 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-dns-svc\") pod \"2d449f49-3eee-47b0-9367-326e093df05a\" (UID: \"2d449f49-3eee-47b0-9367-326e093df05a\") " Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.253678 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4891bd0-8ad8-4e42-a0cc-b3b45365784c" path="/var/lib/kubelet/pods/c4891bd0-8ad8-4e42-a0cc-b3b45365784c/volumes" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.273069 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d449f49-3eee-47b0-9367-326e093df05a-kube-api-access-5wmdv" (OuterVolumeSpecName: "kube-api-access-5wmdv") pod "2d449f49-3eee-47b0-9367-326e093df05a" (UID: "2d449f49-3eee-47b0-9367-326e093df05a"). InnerVolumeSpecName "kube-api-access-5wmdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.301872 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2d449f49-3eee-47b0-9367-326e093df05a" (UID: "2d449f49-3eee-47b0-9367-326e093df05a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.316467 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2d449f49-3eee-47b0-9367-326e093df05a" (UID: "2d449f49-3eee-47b0-9367-326e093df05a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.336109 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-config" (OuterVolumeSpecName: "config") pod "2d449f49-3eee-47b0-9367-326e093df05a" (UID: "2d449f49-3eee-47b0-9367-326e093df05a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.336647 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2d449f49-3eee-47b0-9367-326e093df05a" (UID: "2d449f49-3eee-47b0-9367-326e093df05a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.361895 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.362614 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.362783 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wmdv\" (UniqueName: \"kubernetes.io/projected/2d449f49-3eee-47b0-9367-326e093df05a-kube-api-access-5wmdv\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.362882 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.362994 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d449f49-3eee-47b0-9367-326e093df05a-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.466853 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58dfd7ff49-n5djz"] Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.849532 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cztsz" event={"ID":"d646890b-5054-4ad5-9dc0-940a5e397fd0","Type":"ContainerStarted","Data":"7cf43755c2614aab77515d1114e13f2829d88ef66924e856dc8015272e6c6ef8"} Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.860833 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"e5f3e99e327873cc746be74f26d9d435c7ff3ee4469e3e7214cf7a799f1adbb0"} Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.860873 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"7c51e32b0273eb0eaf645f50253f03a40ce8072d6e92538b292cc2a60bc8ce83"} Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.864554 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" event={"ID":"2d449f49-3eee-47b0-9367-326e093df05a","Type":"ContainerDied","Data":"e5fc5b72975b45d622658107140baa04592e2bc2beefefb8cc244422b9d025e4"} Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.864591 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-kc6tv" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.864611 4631 scope.go:117] "RemoveContainer" containerID="b849cd30dc68a798c6710663fe42399477ab4cf5f7a87d71ee41696dcff93b9c" Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.868036 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58dfd7ff49-n5djz" event={"ID":"9f926252-a86c-4c5b-a394-88c721681c4c","Type":"ContainerStarted","Data":"21bad1c752c351770fbc96734b189c996d76f68f5cdd7d3449db5ed9581b86f6"} Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.872980 4631 generic.go:334] "Generic (PLEG): container finished" podID="e6972f45-d644-4118-8c37-2ca075a65b12" containerID="c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6" exitCode=0 Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.873947 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" event={"ID":"e6972f45-d644-4118-8c37-2ca075a65b12","Type":"ContainerDied","Data":"c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6"} Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.873964 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" event={"ID":"e6972f45-d644-4118-8c37-2ca075a65b12","Type":"ContainerStarted","Data":"f489eeb6dffc11f675d4dfc792e89c46be7d5236374dce3f8c4ddef3cc40d39a"} Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.940935 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-kc6tv"] Nov 29 04:30:17 crc kubenswrapper[4631]: I1129 04:30:17.949629 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-kc6tv"] Nov 29 04:30:18 crc kubenswrapper[4631]: I1129 04:30:18.911155 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-57lmq" event={"ID":"c6cdce96-7bd4-45c6-9597-6196ceee67ef","Type":"ContainerStarted","Data":"9d113d2b9e9971ad81ce0e829fe55922228723302c79bc06b73da6244ca7728a"} Nov 29 04:30:18 crc kubenswrapper[4631]: I1129 04:30:18.935820 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-57lmq" podStartSLOduration=3.518404899 podStartE2EDuration="37.935802979s" podCreationTimestamp="2025-11-29 04:29:41 +0000 UTC" firstStartedPulling="2025-11-29 04:29:42.486054093 +0000 UTC m=+1109.550557607" lastFinishedPulling="2025-11-29 04:30:16.903452173 +0000 UTC m=+1143.967955687" observedRunningTime="2025-11-29 04:30:18.930977752 +0000 UTC m=+1145.995481276" watchObservedRunningTime="2025-11-29 04:30:18.935802979 +0000 UTC m=+1146.000306493" Nov 29 04:30:19 crc kubenswrapper[4631]: I1129 04:30:19.227945 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d449f49-3eee-47b0-9367-326e093df05a" path="/var/lib/kubelet/pods/2d449f49-3eee-47b0-9367-326e093df05a/volumes" Nov 29 04:30:19 crc kubenswrapper[4631]: I1129 04:30:19.926117 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"1d76fb203f0f13b30432475129feff4b52e52edc3bde108d23f4b58f713a6757"} Nov 29 04:30:19 crc kubenswrapper[4631]: I1129 04:30:19.929115 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" event={"ID":"e6972f45-d644-4118-8c37-2ca075a65b12","Type":"ContainerStarted","Data":"f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef"} Nov 29 04:30:19 crc kubenswrapper[4631]: I1129 04:30:19.930130 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:21 crc kubenswrapper[4631]: I1129 04:30:21.954237 4631 generic.go:334] "Generic (PLEG): container finished" podID="4fe6877d-5adf-4b60-9a97-eb309b37fde9" containerID="809fd41a7c49c20fb18551e80cf3895e47ae14d32e10f53e1382dc48742f2ebe" exitCode=0 Nov 29 04:30:21 crc kubenswrapper[4631]: I1129 04:30:21.954436 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jshqq" event={"ID":"4fe6877d-5adf-4b60-9a97-eb309b37fde9","Type":"ContainerDied","Data":"809fd41a7c49c20fb18551e80cf3895e47ae14d32e10f53e1382dc48742f2ebe"} Nov 29 04:30:21 crc kubenswrapper[4631]: I1129 04:30:21.989135 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" podStartSLOduration=8.989119726 podStartE2EDuration="8.989119726s" podCreationTimestamp="2025-11-29 04:30:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:30:19.947845802 +0000 UTC m=+1147.012349316" watchObservedRunningTime="2025-11-29 04:30:21.989119726 +0000 UTC m=+1149.053623240" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.784251 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-df4bd449c-d2k7v"] Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.810430 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-76fdc69464-qvs2b"] Nov 29 04:30:22 crc kubenswrapper[4631]: E1129 04:30:22.810865 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d449f49-3eee-47b0-9367-326e093df05a" containerName="init" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.810879 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d449f49-3eee-47b0-9367-326e093df05a" containerName="init" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.811080 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d449f49-3eee-47b0-9367-326e093df05a" containerName="init" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.811982 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.822817 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-76fdc69464-qvs2b"] Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.823124 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.900759 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-secret-key\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.900865 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-tls-certs\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.900979 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-combined-ca-bundle\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.901103 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-config-data\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.901184 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xprll\" (UniqueName: \"kubernetes.io/projected/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-kube-api-access-xprll\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.901210 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-logs\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.901425 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-scripts\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.909171 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-58dfd7ff49-n5djz"] Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.934976 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5964d597b6-rfcr2"] Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.936283 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:22 crc kubenswrapper[4631]: I1129 04:30:22.957669 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5964d597b6-rfcr2"] Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004291 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-secret-key\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004664 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-tls-certs\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004693 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-combined-ca-bundle\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004722 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2a6410f-6c69-4b87-a247-b285aef98b71-horizon-tls-certs\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004745 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e2a6410f-6c69-4b87-a247-b285aef98b71-horizon-secret-key\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004762 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2a6410f-6c69-4b87-a247-b285aef98b71-logs\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004785 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-config-data\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004802 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e2a6410f-6c69-4b87-a247-b285aef98b71-config-data\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004823 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2a6410f-6c69-4b87-a247-b285aef98b71-scripts\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004851 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xprll\" (UniqueName: \"kubernetes.io/projected/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-kube-api-access-xprll\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004868 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-logs\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004900 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a6410f-6c69-4b87-a247-b285aef98b71-combined-ca-bundle\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004921 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcvrq\" (UniqueName: \"kubernetes.io/projected/e2a6410f-6c69-4b87-a247-b285aef98b71-kube-api-access-qcvrq\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.004941 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-scripts\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.007502 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-scripts\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.007750 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-logs\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.011787 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-config-data\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.014044 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-combined-ca-bundle\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.014311 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-secret-key\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.015266 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-tls-certs\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.023033 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xprll\" (UniqueName: \"kubernetes.io/projected/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-kube-api-access-xprll\") pod \"horizon-76fdc69464-qvs2b\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.106691 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a6410f-6c69-4b87-a247-b285aef98b71-combined-ca-bundle\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.106745 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcvrq\" (UniqueName: \"kubernetes.io/projected/e2a6410f-6c69-4b87-a247-b285aef98b71-kube-api-access-qcvrq\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.106867 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2a6410f-6c69-4b87-a247-b285aef98b71-horizon-tls-certs\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.106887 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e2a6410f-6c69-4b87-a247-b285aef98b71-horizon-secret-key\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.106905 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2a6410f-6c69-4b87-a247-b285aef98b71-logs\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.106928 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e2a6410f-6c69-4b87-a247-b285aef98b71-config-data\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.106947 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2a6410f-6c69-4b87-a247-b285aef98b71-scripts\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.107795 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2a6410f-6c69-4b87-a247-b285aef98b71-scripts\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.107945 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2a6410f-6c69-4b87-a247-b285aef98b71-logs\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.108973 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e2a6410f-6c69-4b87-a247-b285aef98b71-config-data\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.124830 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcvrq\" (UniqueName: \"kubernetes.io/projected/e2a6410f-6c69-4b87-a247-b285aef98b71-kube-api-access-qcvrq\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.125763 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e2a6410f-6c69-4b87-a247-b285aef98b71-horizon-secret-key\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.127948 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2a6410f-6c69-4b87-a247-b285aef98b71-horizon-tls-certs\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.129463 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a6410f-6c69-4b87-a247-b285aef98b71-combined-ca-bundle\") pod \"horizon-5964d597b6-rfcr2\" (UID: \"e2a6410f-6c69-4b87-a247-b285aef98b71\") " pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.140357 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.265417 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.726972 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.825155 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-fernet-keys\") pod \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.825205 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9p28\" (UniqueName: \"kubernetes.io/projected/4fe6877d-5adf-4b60-9a97-eb309b37fde9-kube-api-access-m9p28\") pod \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.825251 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-scripts\") pod \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.825310 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-config-data\") pod \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.825369 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-combined-ca-bundle\") pod \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.825409 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-credential-keys\") pod \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\" (UID: \"4fe6877d-5adf-4b60-9a97-eb309b37fde9\") " Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.832164 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4fe6877d-5adf-4b60-9a97-eb309b37fde9" (UID: "4fe6877d-5adf-4b60-9a97-eb309b37fde9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.833115 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "4fe6877d-5adf-4b60-9a97-eb309b37fde9" (UID: "4fe6877d-5adf-4b60-9a97-eb309b37fde9"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.833456 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-scripts" (OuterVolumeSpecName: "scripts") pod "4fe6877d-5adf-4b60-9a97-eb309b37fde9" (UID: "4fe6877d-5adf-4b60-9a97-eb309b37fde9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.834711 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fe6877d-5adf-4b60-9a97-eb309b37fde9-kube-api-access-m9p28" (OuterVolumeSpecName: "kube-api-access-m9p28") pod "4fe6877d-5adf-4b60-9a97-eb309b37fde9" (UID: "4fe6877d-5adf-4b60-9a97-eb309b37fde9"). InnerVolumeSpecName "kube-api-access-m9p28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.866992 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4fe6877d-5adf-4b60-9a97-eb309b37fde9" (UID: "4fe6877d-5adf-4b60-9a97-eb309b37fde9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.870062 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-config-data" (OuterVolumeSpecName: "config-data") pod "4fe6877d-5adf-4b60-9a97-eb309b37fde9" (UID: "4fe6877d-5adf-4b60-9a97-eb309b37fde9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.929717 4631 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.929748 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9p28\" (UniqueName: \"kubernetes.io/projected/4fe6877d-5adf-4b60-9a97-eb309b37fde9-kube-api-access-m9p28\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.929761 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.929770 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.929780 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.929791 4631 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4fe6877d-5adf-4b60-9a97-eb309b37fde9-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.980828 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jshqq" event={"ID":"4fe6877d-5adf-4b60-9a97-eb309b37fde9","Type":"ContainerDied","Data":"afc91ebb46eedd63b9b3ce27846f6cf1c188b5d7ce0805766095535615796e30"} Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.980858 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jshqq" Nov 29 04:30:23 crc kubenswrapper[4631]: I1129 04:30:23.980878 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afc91ebb46eedd63b9b3ce27846f6cf1c188b5d7ce0805766095535615796e30" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.126830 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-jshqq"] Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.143697 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-jshqq"] Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.235804 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-6bzb6"] Nov 29 04:30:24 crc kubenswrapper[4631]: E1129 04:30:24.236319 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fe6877d-5adf-4b60-9a97-eb309b37fde9" containerName="keystone-bootstrap" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.236344 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fe6877d-5adf-4b60-9a97-eb309b37fde9" containerName="keystone-bootstrap" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.236526 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fe6877d-5adf-4b60-9a97-eb309b37fde9" containerName="keystone-bootstrap" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.237043 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.240032 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.240160 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.241107 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.241501 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-zjswg" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.242311 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.307413 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6bzb6"] Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.328530 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.343154 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whtqq\" (UniqueName: \"kubernetes.io/projected/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-kube-api-access-whtqq\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.343276 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-config-data\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.343300 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-scripts\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.343387 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-fernet-keys\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.343418 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-combined-ca-bundle\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.343441 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-credential-keys\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.410600 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-g5w92"] Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.410825 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-g5w92" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="dnsmasq-dns" containerID="cri-o://0dad95f4d76c1be8f557c91bbee84bc400d1680b83812ca4f2bc9b81509f19ea" gracePeriod=10 Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.444763 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whtqq\" (UniqueName: \"kubernetes.io/projected/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-kube-api-access-whtqq\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.444863 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-config-data\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.444886 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-scripts\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.444924 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-fernet-keys\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.444946 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-combined-ca-bundle\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.444962 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-credential-keys\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.451021 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-scripts\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.453195 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-config-data\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.453653 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-fernet-keys\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.455522 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-combined-ca-bundle\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.460839 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whtqq\" (UniqueName: \"kubernetes.io/projected/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-kube-api-access-whtqq\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.470771 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-credential-keys\") pod \"keystone-bootstrap-6bzb6\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.631916 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.991642 4631 generic.go:334] "Generic (PLEG): container finished" podID="093fbd07-3966-4e87-bc8b-f007e875937f" containerID="0dad95f4d76c1be8f557c91bbee84bc400d1680b83812ca4f2bc9b81509f19ea" exitCode=0 Nov 29 04:30:24 crc kubenswrapper[4631]: I1129 04:30:24.991958 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-g5w92" event={"ID":"093fbd07-3966-4e87-bc8b-f007e875937f","Type":"ContainerDied","Data":"0dad95f4d76c1be8f557c91bbee84bc400d1680b83812ca4f2bc9b81509f19ea"} Nov 29 04:30:25 crc kubenswrapper[4631]: I1129 04:30:25.229498 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fe6877d-5adf-4b60-9a97-eb309b37fde9" path="/var/lib/kubelet/pods/4fe6877d-5adf-4b60-9a97-eb309b37fde9/volumes" Nov 29 04:30:27 crc kubenswrapper[4631]: I1129 04:30:27.034298 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"1c65add786a206a369928408e5b3380548439f0a5ab172c1e9af7c842248b15c"} Nov 29 04:30:28 crc kubenswrapper[4631]: I1129 04:30:28.205188 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-g5w92" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: connect: connection refused" Nov 29 04:30:33 crc kubenswrapper[4631]: I1129 04:30:33.204799 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-g5w92" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: connect: connection refused" Nov 29 04:30:38 crc kubenswrapper[4631]: I1129 04:30:38.205093 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-g5w92" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: connect: connection refused" Nov 29 04:30:38 crc kubenswrapper[4631]: I1129 04:30:38.205820 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:30:38 crc kubenswrapper[4631]: E1129 04:30:38.922301 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 29 04:30:38 crc kubenswrapper[4631]: E1129 04:30:38.922506 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n87h65h548hbdh6ch9fh6dh5f8h5d6hcbh546h8ch59ch7ch5f4h5cbh96h55dhdfh594h5c4h5c5h666h5b9h654h548h57ch79h654h577h5d6h5c5q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bq7rz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-58dfd7ff49-n5djz_openstack(9f926252-a86c-4c5b-a394-88c721681c4c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:30:38 crc kubenswrapper[4631]: E1129 04:30:38.929686 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-58dfd7ff49-n5djz" podUID="9f926252-a86c-4c5b-a394-88c721681c4c" Nov 29 04:30:38 crc kubenswrapper[4631]: E1129 04:30:38.939905 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 29 04:30:38 crc kubenswrapper[4631]: E1129 04:30:38.940042 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n9h54dh58h64dh68h5dch649h669h55fh654h687h69h59h5ffh68dh648h5f8h8dh56ch68h87hb4h7fh9hb5h54ch689h596h65fh54h59fh547q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mz7zg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-df4bd449c-d2k7v_openstack(25c11a81-f1b8-46c8-aed5-7875a37dbd06): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:30:38 crc kubenswrapper[4631]: E1129 04:30:38.952678 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-df4bd449c-d2k7v" podUID="25c11a81-f1b8-46c8-aed5-7875a37dbd06" Nov 29 04:30:38 crc kubenswrapper[4631]: E1129 04:30:38.953445 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 29 04:30:38 crc kubenswrapper[4631]: E1129 04:30:38.953581 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5cch5bdhf5h647h97h647h646h655h5d5h55bh645h567hdbh5b8h686h58h9dh94h5b5h56fh67h54fh558h575hdbh8ch5b8h566hdbh5f4h9dhc9q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5z5s6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-849868977-5t6pd_openstack(d194b14b-2b7c-4903-9361-e09887f4057f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:30:38 crc kubenswrapper[4631]: E1129 04:30:38.957063 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-849868977-5t6pd" podUID="d194b14b-2b7c-4903-9361-e09887f4057f" Nov 29 04:30:39 crc kubenswrapper[4631]: E1129 04:30:39.903285 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 29 04:30:39 crc kubenswrapper[4631]: E1129 04:30:39.903668 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r2h4b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-4zg2x_openstack(77e723d0-49bf-4008-bbce-7c1fe2ad3a5d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:30:39 crc kubenswrapper[4631]: E1129 04:30:39.906008 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-4zg2x" podUID="77e723d0-49bf-4008-bbce-7c1fe2ad3a5d" Nov 29 04:30:40 crc kubenswrapper[4631]: I1129 04:30:40.176440 4631 generic.go:334] "Generic (PLEG): container finished" podID="c6cdce96-7bd4-45c6-9597-6196ceee67ef" containerID="9d113d2b9e9971ad81ce0e829fe55922228723302c79bc06b73da6244ca7728a" exitCode=0 Nov 29 04:30:40 crc kubenswrapper[4631]: I1129 04:30:40.176481 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-57lmq" event={"ID":"c6cdce96-7bd4-45c6-9597-6196ceee67ef","Type":"ContainerDied","Data":"9d113d2b9e9971ad81ce0e829fe55922228723302c79bc06b73da6244ca7728a"} Nov 29 04:30:40 crc kubenswrapper[4631]: E1129 04:30:40.180412 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-4zg2x" podUID="77e723d0-49bf-4008-bbce-7c1fe2ad3a5d" Nov 29 04:30:40 crc kubenswrapper[4631]: E1129 04:30:40.307994 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 29 04:30:40 crc kubenswrapper[4631]: E1129 04:30:40.308936 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n547h555hcdh5fch677h64dh64ch68dh644h89h5b9h659h8bh55h7dh68bh667h595h65bhdfh5bch54fh7dhf4h5f4h8h5f6h69h85h576h685h97q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9nw56,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(782a0b9b-d16f-495e-a648-e8a03af1e2d2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:30:48 crc kubenswrapper[4631]: I1129 04:30:48.204635 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-g5w92" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: i/o timeout" Nov 29 04:30:49 crc kubenswrapper[4631]: I1129 04:30:49.271607 4631 generic.go:334] "Generic (PLEG): container finished" podID="30557b8c-2204-4118-a123-8fb42dc36b19" containerID="ae5b2bdd4d4687b9e130c1e5e7c365ad1d8e48fd67ea533d600227b1a2db6a1b" exitCode=0 Nov 29 04:30:49 crc kubenswrapper[4631]: I1129 04:30:49.271650 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7882p" event={"ID":"30557b8c-2204-4118-a123-8fb42dc36b19","Type":"ContainerDied","Data":"ae5b2bdd4d4687b9e130c1e5e7c365ad1d8e48fd67ea533d600227b1a2db6a1b"} Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.673043 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.701687 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.714855 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.717178 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.728415 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-57lmq" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.735791 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.814727 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-combined-ca-bundle\") pod \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.814767 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bq7rz\" (UniqueName: \"kubernetes.io/projected/9f926252-a86c-4c5b-a394-88c721681c4c-kube-api-access-bq7rz\") pod \"9f926252-a86c-4c5b-a394-88c721681c4c\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.814793 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-sb\") pod \"093fbd07-3966-4e87-bc8b-f007e875937f\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.814821 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25c11a81-f1b8-46c8-aed5-7875a37dbd06-logs\") pod \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.815156 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/25c11a81-f1b8-46c8-aed5-7875a37dbd06-logs" (OuterVolumeSpecName: "logs") pod "25c11a81-f1b8-46c8-aed5-7875a37dbd06" (UID: "25c11a81-f1b8-46c8-aed5-7875a37dbd06"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.815242 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-dns-svc\") pod \"093fbd07-3966-4e87-bc8b-f007e875937f\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.815434 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/25c11a81-f1b8-46c8-aed5-7875a37dbd06-horizon-secret-key\") pod \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.815557 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5z5s6\" (UniqueName: \"kubernetes.io/projected/d194b14b-2b7c-4903-9361-e09887f4057f-kube-api-access-5z5s6\") pod \"d194b14b-2b7c-4903-9361-e09887f4057f\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816113 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-config\") pod \"093fbd07-3966-4e87-bc8b-f007e875937f\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816191 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpzxp\" (UniqueName: \"kubernetes.io/projected/30557b8c-2204-4118-a123-8fb42dc36b19-kube-api-access-hpzxp\") pod \"30557b8c-2204-4118-a123-8fb42dc36b19\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816225 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99w4p\" (UniqueName: \"kubernetes.io/projected/093fbd07-3966-4e87-bc8b-f007e875937f-kube-api-access-99w4p\") pod \"093fbd07-3966-4e87-bc8b-f007e875937f\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816250 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9f926252-a86c-4c5b-a394-88c721681c4c-horizon-secret-key\") pod \"9f926252-a86c-4c5b-a394-88c721681c4c\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816288 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-scripts\") pod \"d194b14b-2b7c-4903-9361-e09887f4057f\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816319 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzd87\" (UniqueName: \"kubernetes.io/projected/c6cdce96-7bd4-45c6-9597-6196ceee67ef-kube-api-access-zzd87\") pod \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816355 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-scripts\") pod \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816387 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-config-data\") pod \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816412 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d194b14b-2b7c-4903-9361-e09887f4057f-logs\") pod \"d194b14b-2b7c-4903-9361-e09887f4057f\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816460 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d194b14b-2b7c-4903-9361-e09887f4057f-horizon-secret-key\") pod \"d194b14b-2b7c-4903-9361-e09887f4057f\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816500 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-nb\") pod \"093fbd07-3966-4e87-bc8b-f007e875937f\" (UID: \"093fbd07-3966-4e87-bc8b-f007e875937f\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.816990 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-config-data\") pod \"9f926252-a86c-4c5b-a394-88c721681c4c\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.818039 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-config-data\") pod \"d194b14b-2b7c-4903-9361-e09887f4057f\" (UID: \"d194b14b-2b7c-4903-9361-e09887f4057f\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.818637 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-scripts" (OuterVolumeSpecName: "scripts") pod "9f926252-a86c-4c5b-a394-88c721681c4c" (UID: "9f926252-a86c-4c5b-a394-88c721681c4c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.820052 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-config-data" (OuterVolumeSpecName: "config-data") pod "9f926252-a86c-4c5b-a394-88c721681c4c" (UID: "9f926252-a86c-4c5b-a394-88c721681c4c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.821112 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-config-data" (OuterVolumeSpecName: "config-data") pod "d194b14b-2b7c-4903-9361-e09887f4057f" (UID: "d194b14b-2b7c-4903-9361-e09887f4057f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.818151 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-scripts\") pod \"9f926252-a86c-4c5b-a394-88c721681c4c\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.822553 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-combined-ca-bundle\") pod \"30557b8c-2204-4118-a123-8fb42dc36b19\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.822731 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-config\") pod \"30557b8c-2204-4118-a123-8fb42dc36b19\" (UID: \"30557b8c-2204-4118-a123-8fb42dc36b19\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.824570 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mz7zg\" (UniqueName: \"kubernetes.io/projected/25c11a81-f1b8-46c8-aed5-7875a37dbd06-kube-api-access-mz7zg\") pod \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.824612 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f926252-a86c-4c5b-a394-88c721681c4c-logs\") pod \"9f926252-a86c-4c5b-a394-88c721681c4c\" (UID: \"9f926252-a86c-4c5b-a394-88c721681c4c\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.824641 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-config-data\") pod \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\" (UID: \"25c11a81-f1b8-46c8-aed5-7875a37dbd06\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.824666 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-db-sync-config-data\") pod \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\" (UID: \"c6cdce96-7bd4-45c6-9597-6196ceee67ef\") " Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.824989 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d194b14b-2b7c-4903-9361-e09887f4057f-kube-api-access-5z5s6" (OuterVolumeSpecName: "kube-api-access-5z5s6") pod "d194b14b-2b7c-4903-9361-e09887f4057f" (UID: "d194b14b-2b7c-4903-9361-e09887f4057f"). InnerVolumeSpecName "kube-api-access-5z5s6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.825594 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5z5s6\" (UniqueName: \"kubernetes.io/projected/d194b14b-2b7c-4903-9361-e09887f4057f-kube-api-access-5z5s6\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.825624 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.825634 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.825667 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9f926252-a86c-4c5b-a394-88c721681c4c-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.825679 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25c11a81-f1b8-46c8-aed5-7875a37dbd06-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.828840 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f926252-a86c-4c5b-a394-88c721681c4c-kube-api-access-bq7rz" (OuterVolumeSpecName: "kube-api-access-bq7rz") pod "9f926252-a86c-4c5b-a394-88c721681c4c" (UID: "9f926252-a86c-4c5b-a394-88c721681c4c"). InnerVolumeSpecName "kube-api-access-bq7rz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.828916 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25c11a81-f1b8-46c8-aed5-7875a37dbd06-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "25c11a81-f1b8-46c8-aed5-7875a37dbd06" (UID: "25c11a81-f1b8-46c8-aed5-7875a37dbd06"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.829543 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-scripts" (OuterVolumeSpecName: "scripts") pod "25c11a81-f1b8-46c8-aed5-7875a37dbd06" (UID: "25c11a81-f1b8-46c8-aed5-7875a37dbd06"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.830103 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-config-data" (OuterVolumeSpecName: "config-data") pod "25c11a81-f1b8-46c8-aed5-7875a37dbd06" (UID: "25c11a81-f1b8-46c8-aed5-7875a37dbd06"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.830119 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f926252-a86c-4c5b-a394-88c721681c4c-logs" (OuterVolumeSpecName: "logs") pod "9f926252-a86c-4c5b-a394-88c721681c4c" (UID: "9f926252-a86c-4c5b-a394-88c721681c4c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.830971 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d194b14b-2b7c-4903-9361-e09887f4057f-logs" (OuterVolumeSpecName: "logs") pod "d194b14b-2b7c-4903-9361-e09887f4057f" (UID: "d194b14b-2b7c-4903-9361-e09887f4057f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.831256 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-scripts" (OuterVolumeSpecName: "scripts") pod "d194b14b-2b7c-4903-9361-e09887f4057f" (UID: "d194b14b-2b7c-4903-9361-e09887f4057f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.835526 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f926252-a86c-4c5b-a394-88c721681c4c-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "9f926252-a86c-4c5b-a394-88c721681c4c" (UID: "9f926252-a86c-4c5b-a394-88c721681c4c"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.836374 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30557b8c-2204-4118-a123-8fb42dc36b19-kube-api-access-hpzxp" (OuterVolumeSpecName: "kube-api-access-hpzxp") pod "30557b8c-2204-4118-a123-8fb42dc36b19" (UID: "30557b8c-2204-4118-a123-8fb42dc36b19"). InnerVolumeSpecName "kube-api-access-hpzxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.841239 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/093fbd07-3966-4e87-bc8b-f007e875937f-kube-api-access-99w4p" (OuterVolumeSpecName: "kube-api-access-99w4p") pod "093fbd07-3966-4e87-bc8b-f007e875937f" (UID: "093fbd07-3966-4e87-bc8b-f007e875937f"). InnerVolumeSpecName "kube-api-access-99w4p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.842557 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c6cdce96-7bd4-45c6-9597-6196ceee67ef" (UID: "c6cdce96-7bd4-45c6-9597-6196ceee67ef"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.842656 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25c11a81-f1b8-46c8-aed5-7875a37dbd06-kube-api-access-mz7zg" (OuterVolumeSpecName: "kube-api-access-mz7zg") pod "25c11a81-f1b8-46c8-aed5-7875a37dbd06" (UID: "25c11a81-f1b8-46c8-aed5-7875a37dbd06"). InnerVolumeSpecName "kube-api-access-mz7zg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.852531 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6cdce96-7bd4-45c6-9597-6196ceee67ef-kube-api-access-zzd87" (OuterVolumeSpecName: "kube-api-access-zzd87") pod "c6cdce96-7bd4-45c6-9597-6196ceee67ef" (UID: "c6cdce96-7bd4-45c6-9597-6196ceee67ef"). InnerVolumeSpecName "kube-api-access-zzd87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.867383 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d194b14b-2b7c-4903-9361-e09887f4057f-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "d194b14b-2b7c-4903-9361-e09887f4057f" (UID: "d194b14b-2b7c-4903-9361-e09887f4057f"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.905879 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-config" (OuterVolumeSpecName: "config") pod "30557b8c-2204-4118-a123-8fb42dc36b19" (UID: "30557b8c-2204-4118-a123-8fb42dc36b19"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.925455 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6cdce96-7bd4-45c6-9597-6196ceee67ef" (UID: "c6cdce96-7bd4-45c6-9597-6196ceee67ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.926398 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "30557b8c-2204-4118-a123-8fb42dc36b19" (UID: "30557b8c-2204-4118-a123-8fb42dc36b19"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937602 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpzxp\" (UniqueName: \"kubernetes.io/projected/30557b8c-2204-4118-a123-8fb42dc36b19-kube-api-access-hpzxp\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937634 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-99w4p\" (UniqueName: \"kubernetes.io/projected/093fbd07-3966-4e87-bc8b-f007e875937f-kube-api-access-99w4p\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937644 4631 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9f926252-a86c-4c5b-a394-88c721681c4c-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937657 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d194b14b-2b7c-4903-9361-e09887f4057f-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937667 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzd87\" (UniqueName: \"kubernetes.io/projected/c6cdce96-7bd4-45c6-9597-6196ceee67ef-kube-api-access-zzd87\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937676 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937684 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d194b14b-2b7c-4903-9361-e09887f4057f-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937692 4631 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d194b14b-2b7c-4903-9361-e09887f4057f-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937701 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937709 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/30557b8c-2204-4118-a123-8fb42dc36b19-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937718 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mz7zg\" (UniqueName: \"kubernetes.io/projected/25c11a81-f1b8-46c8-aed5-7875a37dbd06-kube-api-access-mz7zg\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937727 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f926252-a86c-4c5b-a394-88c721681c4c-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937736 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/25c11a81-f1b8-46c8-aed5-7875a37dbd06-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937744 4631 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937752 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937760 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bq7rz\" (UniqueName: \"kubernetes.io/projected/9f926252-a86c-4c5b-a394-88c721681c4c-kube-api-access-bq7rz\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.937768 4631 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/25c11a81-f1b8-46c8-aed5-7875a37dbd06-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.944728 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "093fbd07-3966-4e87-bc8b-f007e875937f" (UID: "093fbd07-3966-4e87-bc8b-f007e875937f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.949497 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-config-data" (OuterVolumeSpecName: "config-data") pod "c6cdce96-7bd4-45c6-9597-6196ceee67ef" (UID: "c6cdce96-7bd4-45c6-9597-6196ceee67ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.957725 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-config" (OuterVolumeSpecName: "config") pod "093fbd07-3966-4e87-bc8b-f007e875937f" (UID: "093fbd07-3966-4e87-bc8b-f007e875937f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.958035 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "093fbd07-3966-4e87-bc8b-f007e875937f" (UID: "093fbd07-3966-4e87-bc8b-f007e875937f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:52 crc kubenswrapper[4631]: I1129 04:30:52.964286 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "093fbd07-3966-4e87-bc8b-f007e875937f" (UID: "093fbd07-3966-4e87-bc8b-f007e875937f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.018900 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5964d597b6-rfcr2"] Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.039158 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6cdce96-7bd4-45c6-9597-6196ceee67ef-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.039181 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.039190 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.039377 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.039392 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/093fbd07-3966-4e87-bc8b-f007e875937f-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.205650 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-g5w92" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: i/o timeout" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.309808 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7882p" event={"ID":"30557b8c-2204-4118-a123-8fb42dc36b19","Type":"ContainerDied","Data":"76d82171d2dbb683de177577c5ecc5c68f6c5f76d232d651ad02ab30c8b1acb8"} Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.310114 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76d82171d2dbb683de177577c5ecc5c68f6c5f76d232d651ad02ab30c8b1acb8" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.310392 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7882p" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.316223 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-g5w92" event={"ID":"093fbd07-3966-4e87-bc8b-f007e875937f","Type":"ContainerDied","Data":"c080249d24027338138d511688594ad0eacf5913b5dcbd18e63e0ea28bfbdd96"} Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.316276 4631 scope.go:117] "RemoveContainer" containerID="0dad95f4d76c1be8f557c91bbee84bc400d1680b83812ca4f2bc9b81509f19ea" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.316422 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-g5w92" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.320123 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-849868977-5t6pd" event={"ID":"d194b14b-2b7c-4903-9361-e09887f4057f","Type":"ContainerDied","Data":"3ebe41b2120663e3a68a13a434ee18c7acebe4ec6e5f9911f71e47dfc140acbc"} Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.320272 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-849868977-5t6pd" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.326173 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-df4bd449c-d2k7v" event={"ID":"25c11a81-f1b8-46c8-aed5-7875a37dbd06","Type":"ContainerDied","Data":"73c5f557b838bdbff7516fee9215afd7a764355c0ce03de6578fce520256397e"} Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.326229 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-df4bd449c-d2k7v" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.334874 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-57lmq" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.334867 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-57lmq" event={"ID":"c6cdce96-7bd4-45c6-9597-6196ceee67ef","Type":"ContainerDied","Data":"f89561fe83ccabd9075f3ae746a2839314537ae1ba3e3f0cd7e9a31f8861f76a"} Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.335462 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f89561fe83ccabd9075f3ae746a2839314537ae1ba3e3f0cd7e9a31f8861f76a" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.339013 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58dfd7ff49-n5djz" event={"ID":"9f926252-a86c-4c5b-a394-88c721681c4c","Type":"ContainerDied","Data":"21bad1c752c351770fbc96734b189c996d76f68f5cdd7d3449db5ed9581b86f6"} Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.339119 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58dfd7ff49-n5djz" Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.358881 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-g5w92"] Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.378380 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-g5w92"] Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.405553 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-df4bd449c-d2k7v"] Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.417567 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-df4bd449c-d2k7v"] Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.441932 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-849868977-5t6pd"] Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.452428 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-849868977-5t6pd"] Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.466783 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-58dfd7ff49-n5djz"] Nov 29 04:30:53 crc kubenswrapper[4631]: I1129 04:30:53.472125 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-58dfd7ff49-n5djz"] Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.088884 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54b684dc7c-ncd8n"] Nov 29 04:30:54 crc kubenswrapper[4631]: E1129 04:30:54.089350 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="dnsmasq-dns" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.089370 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="dnsmasq-dns" Nov 29 04:30:54 crc kubenswrapper[4631]: E1129 04:30:54.089417 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="init" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.089426 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="init" Nov 29 04:30:54 crc kubenswrapper[4631]: E1129 04:30:54.089439 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30557b8c-2204-4118-a123-8fb42dc36b19" containerName="neutron-db-sync" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.089449 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="30557b8c-2204-4118-a123-8fb42dc36b19" containerName="neutron-db-sync" Nov 29 04:30:54 crc kubenswrapper[4631]: E1129 04:30:54.089465 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6cdce96-7bd4-45c6-9597-6196ceee67ef" containerName="glance-db-sync" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.089473 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6cdce96-7bd4-45c6-9597-6196ceee67ef" containerName="glance-db-sync" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.089685 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" containerName="dnsmasq-dns" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.089715 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="30557b8c-2204-4118-a123-8fb42dc36b19" containerName="neutron-db-sync" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.089755 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6cdce96-7bd4-45c6-9597-6196ceee67ef" containerName="glance-db-sync" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.090883 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.126438 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54b684dc7c-ncd8n"] Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.169078 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bltn\" (UniqueName: \"kubernetes.io/projected/cc354305-e26e-45d9-9146-8f62b897f1ac-kube-api-access-5bltn\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.169137 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-config\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.169176 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-dns-svc\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.169438 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-sb\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.169502 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-nb\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.270779 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-sb\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.270846 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-nb\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.270916 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bltn\" (UniqueName: \"kubernetes.io/projected/cc354305-e26e-45d9-9146-8f62b897f1ac-kube-api-access-5bltn\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.270937 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-config\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.270962 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-dns-svc\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.271825 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-dns-svc\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.272317 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-sb\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.272934 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-config\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.273145 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-nb\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.325709 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bltn\" (UniqueName: \"kubernetes.io/projected/cc354305-e26e-45d9-9146-8f62b897f1ac-kube-api-access-5bltn\") pod \"dnsmasq-dns-54b684dc7c-ncd8n\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.385484 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7f5f7c4c6d-j949k"] Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.429501 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.437189 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f5f7c4c6d-j949k"] Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.437304 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.441546 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.441774 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.441988 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-7gknv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.442129 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.516024 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54b684dc7c-ncd8n"] Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.549974 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fb745b69-cw9bv"] Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.551601 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.577343 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-cw9bv"] Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.584833 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-config\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.584937 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ccb7\" (UniqueName: \"kubernetes.io/projected/61789a4f-3d8f-44c8-b8ea-5d43da626439-kube-api-access-6ccb7\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.584969 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-combined-ca-bundle\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.585039 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-httpd-config\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.585091 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-ovndb-tls-certs\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.686926 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ccb7\" (UniqueName: \"kubernetes.io/projected/61789a4f-3d8f-44c8-b8ea-5d43da626439-kube-api-access-6ccb7\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.687236 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-combined-ca-bundle\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.687298 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-httpd-config\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.687353 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-sb\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.687385 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-ovndb-tls-certs\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.687412 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-config\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.687433 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-dns-svc\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.687457 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-nb\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.687478 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4t6z6\" (UniqueName: \"kubernetes.io/projected/7b961caf-9678-485f-be86-93f60808ac4d-kube-api-access-4t6z6\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.687511 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-config\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.694065 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-combined-ca-bundle\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.696009 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-ovndb-tls-certs\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.702411 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-httpd-config\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.723042 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-config\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.724944 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ccb7\" (UniqueName: \"kubernetes.io/projected/61789a4f-3d8f-44c8-b8ea-5d43da626439-kube-api-access-6ccb7\") pod \"neutron-7f5f7c4c6d-j949k\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.772486 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.790256 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-sb\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.790347 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-dns-svc\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.790377 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-nb\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.790398 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4t6z6\" (UniqueName: \"kubernetes.io/projected/7b961caf-9678-485f-be86-93f60808ac4d-kube-api-access-4t6z6\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:54 crc kubenswrapper[4631]: I1129 04:30:54.790431 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-config\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:54.791634 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-config\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:54.792901 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-nb\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:54.793225 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-sb\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:54.794467 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-dns-svc\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:54.811630 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4t6z6\" (UniqueName: \"kubernetes.io/projected/7b961caf-9678-485f-be86-93f60808ac4d-kube-api-access-4t6z6\") pod \"dnsmasq-dns-fb745b69-cw9bv\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:54.870958 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.227611 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="093fbd07-3966-4e87-bc8b-f007e875937f" path="/var/lib/kubelet/pods/093fbd07-3966-4e87-bc8b-f007e875937f/volumes" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.228592 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25c11a81-f1b8-46c8-aed5-7875a37dbd06" path="/var/lib/kubelet/pods/25c11a81-f1b8-46c8-aed5-7875a37dbd06/volumes" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.229007 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f926252-a86c-4c5b-a394-88c721681c4c" path="/var/lib/kubelet/pods/9f926252-a86c-4c5b-a394-88c721681c4c/volumes" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.229407 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d194b14b-2b7c-4903-9361-e09887f4057f" path="/var/lib/kubelet/pods/d194b14b-2b7c-4903-9361-e09887f4057f/volumes" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.321260 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.322893 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.325291 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.325452 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-qmjvc" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.326699 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.333710 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.398840 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t56jh\" (UniqueName: \"kubernetes.io/projected/448a8237-ee30-4d73-86be-d12a309691f5-kube-api-access-t56jh\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.398899 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.399279 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-config-data\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.399390 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.399418 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-logs\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.399436 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.399454 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-scripts\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.501081 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-config-data\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.501136 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.501163 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-logs\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.501178 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.501196 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-scripts\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.501223 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t56jh\" (UniqueName: \"kubernetes.io/projected/448a8237-ee30-4d73-86be-d12a309691f5-kube-api-access-t56jh\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.501250 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.501720 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.503907 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-logs\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.503984 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.509502 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-config-data\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.510044 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.522819 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-scripts\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.531492 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.532751 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t56jh\" (UniqueName: \"kubernetes.io/projected/448a8237-ee30-4d73-86be-d12a309691f5-kube-api-access-t56jh\") pod \"glance-default-external-api-0\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.642352 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: E1129 04:30:55.718967 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 29 04:30:55 crc kubenswrapper[4631]: E1129 04:30:55.720118 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m2fgr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-cztsz_openstack(d646890b-5054-4ad5-9dc0-940a5e397fd0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:30:55 crc kubenswrapper[4631]: E1129 04:30:55.721314 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-cztsz" podUID="d646890b-5054-4ad5-9dc0-940a5e397fd0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.769132 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.770830 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.778552 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:30:55 crc kubenswrapper[4631]: W1129 04:30:55.783655 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode2a6410f_6c69_4b87_a247_b285aef98b71.slice/crio-212ae4cdb5e9b644a285132160beedefa8bb4b1fc2cb38fa8cfd0ec74628f493 WatchSource:0}: Error finding container 212ae4cdb5e9b644a285132160beedefa8bb4b1fc2cb38fa8cfd0ec74628f493: Status 404 returned error can't find the container with id 212ae4cdb5e9b644a285132160beedefa8bb4b1fc2cb38fa8cfd0ec74628f493 Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.783992 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.807061 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dh497\" (UniqueName: \"kubernetes.io/projected/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-kube-api-access-dh497\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.807112 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.807150 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-logs\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.807196 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.807224 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.807247 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.816186 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:55 crc kubenswrapper[4631]: I1129 04:30:55.820288 4631 scope.go:117] "RemoveContainer" containerID="20fb52cd87cc20708af976220cd7b202ca9e41f53aa76a84e56bb66a8ff19bb7" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.917991 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.918059 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dh497\" (UniqueName: \"kubernetes.io/projected/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-kube-api-access-dh497\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.918113 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.918457 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-logs\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.918538 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.919369 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-logs\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.919370 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.919728 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.920102 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.920143 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.931307 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.931732 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.943730 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dh497\" (UniqueName: \"kubernetes.io/projected/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-kube-api-access-dh497\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.955087 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:55.967282 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:56.138427 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-76fdc69464-qvs2b"] Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:56.147026 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:56.386605 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76fdc69464-qvs2b" event={"ID":"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14","Type":"ContainerStarted","Data":"cb412ed9a6866664db61f196fb7f78357a51c0fb01b19d75336d0942e33b7106"} Nov 29 04:30:56 crc kubenswrapper[4631]: I1129 04:30:56.387781 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5964d597b6-rfcr2" event={"ID":"e2a6410f-6c69-4b87-a247-b285aef98b71","Type":"ContainerStarted","Data":"212ae4cdb5e9b644a285132160beedefa8bb4b1fc2cb38fa8cfd0ec74628f493"} Nov 29 04:30:56 crc kubenswrapper[4631]: E1129 04:30:56.423926 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-cztsz" podUID="d646890b-5054-4ad5-9dc0-940a5e397fd0" Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.020680 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6bzb6"] Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.166031 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.447819 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.466772 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54b684dc7c-ncd8n"] Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.472862 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-cw9bv"] Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.508419 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jmcnk" event={"ID":"8a612579-d131-4dbd-85bc-ba455a26db3b","Type":"ContainerStarted","Data":"d30c2c6a7b2349da3e750e196662bfa5b0180b9be3c541dc573adad9713b9cdb"} Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.510619 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6bzb6" event={"ID":"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed","Type":"ContainerStarted","Data":"c0a8467046c1c09cb667c26e37e33c49934cfbe4e16af9a5c220931462c99b5d"} Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.514466 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5964d597b6-rfcr2" event={"ID":"e2a6410f-6c69-4b87-a247-b285aef98b71","Type":"ContainerStarted","Data":"e50d02c1a4b790d0f5a01851f155895de0f28b91699fc6245f4ee851749d5943"} Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.515817 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"448a8237-ee30-4d73-86be-d12a309691f5","Type":"ContainerStarted","Data":"4e50ed2388f9b1fa2a40d1d288783f52e2c92bead667157277d771822601f304"} Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.532434 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-jmcnk" podStartSLOduration=7.335102941 podStartE2EDuration="44.532421051s" podCreationTimestamp="2025-11-29 04:30:13 +0000 UTC" firstStartedPulling="2025-11-29 04:30:15.281705891 +0000 UTC m=+1142.346209405" lastFinishedPulling="2025-11-29 04:30:52.479023991 +0000 UTC m=+1179.543527515" observedRunningTime="2025-11-29 04:30:57.522077879 +0000 UTC m=+1184.586581393" watchObservedRunningTime="2025-11-29 04:30:57.532421051 +0000 UTC m=+1184.596924565" Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.537387 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"ec77098f5447ae8c296a8afb9791e43d27e002e4c07c84ecf65e789696f0da34"} Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.539399 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"7d16b12cc15875b911e711696eed3b1580fd77608958b8996d5daef2597775e2"} Nov 29 04:30:57 crc kubenswrapper[4631]: I1129 04:30:57.555823 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f5f7c4c6d-j949k"] Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.559588 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e","Type":"ContainerStarted","Data":"b2905cfbb27132258fc33017455de1e7e4a603b93de37574fd5f2404b7bbd25a"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.567678 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5964d597b6-rfcr2" event={"ID":"e2a6410f-6c69-4b87-a247-b285aef98b71","Type":"ContainerStarted","Data":"cbbb99ecfc0bc493701045852b7269c4bcff1a9e60435681b409c45bbed38032"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.580010 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-4zg2x" event={"ID":"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d","Type":"ContainerStarted","Data":"bc62497bf619cacb0fdf8dcd7974505a74ab66a52fdbe5b93e3b60a31820e5e5"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.585888 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6bzb6" event={"ID":"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed","Type":"ContainerStarted","Data":"9bdb5e8368f5ef2303ceaf6ab54c3259104ac54a2908cf05b3fff429ecb41a28"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.593482 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76fdc69464-qvs2b" event={"ID":"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14","Type":"ContainerStarted","Data":"8b44c75824934f7b87d14cf3fa1b963da97caaa8b8cf8a2df430157835986df1"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.593509 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76fdc69464-qvs2b" event={"ID":"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14","Type":"ContainerStarted","Data":"b8821366a8d7bbbc829496fae08b6db06349f21d4ce52f873b767b5bf290f050"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.599192 4631 generic.go:334] "Generic (PLEG): container finished" podID="7b961caf-9678-485f-be86-93f60808ac4d" containerID="23e12f2f4f3b6506fe8fbad410a913f052575bba2d2e514f1581daec25561142" exitCode=0 Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.599268 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" event={"ID":"7b961caf-9678-485f-be86-93f60808ac4d","Type":"ContainerDied","Data":"23e12f2f4f3b6506fe8fbad410a913f052575bba2d2e514f1581daec25561142"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.599314 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" event={"ID":"7b961caf-9678-485f-be86-93f60808ac4d","Type":"ContainerStarted","Data":"1b270147ec17d6908ecce2d9c567772e4cb0c44b171ddd8ae2f0254c87fa7ba4"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.601630 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f5f7c4c6d-j949k" event={"ID":"61789a4f-3d8f-44c8-b8ea-5d43da626439","Type":"ContainerStarted","Data":"ad95cea063e9d485d805de7dd1ad46aaf52c15e5c59f83e5f29be1e83c4ecbfc"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.601657 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f5f7c4c6d-j949k" event={"ID":"61789a4f-3d8f-44c8-b8ea-5d43da626439","Type":"ContainerStarted","Data":"90a5fb10ab91b1634b133da54f98fa13401d6aa2696e4adb7fb41a846f8f9032"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.604285 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5964d597b6-rfcr2" podStartSLOduration=35.755764963 podStartE2EDuration="36.604268525s" podCreationTimestamp="2025-11-29 04:30:22 +0000 UTC" firstStartedPulling="2025-11-29 04:30:55.819754916 +0000 UTC m=+1182.884258430" lastFinishedPulling="2025-11-29 04:30:56.668258488 +0000 UTC m=+1183.732761992" observedRunningTime="2025-11-29 04:30:58.598921805 +0000 UTC m=+1185.663425309" watchObservedRunningTime="2025-11-29 04:30:58.604268525 +0000 UTC m=+1185.668772039" Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.616749 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"782a0b9b-d16f-495e-a648-e8a03af1e2d2","Type":"ContainerStarted","Data":"ae3a0d81ae39a49fb920c6b1e172a23981afa1366441f0a3feacc58a766e9c20"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.643782 4631 generic.go:334] "Generic (PLEG): container finished" podID="cc354305-e26e-45d9-9146-8f62b897f1ac" containerID="f81a682b364e4c0d35c8129e68c0f68fc732704e6d6a2ec9b3c8a815f31c06e9" exitCode=0 Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.643861 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" event={"ID":"cc354305-e26e-45d9-9146-8f62b897f1ac","Type":"ContainerDied","Data":"f81a682b364e4c0d35c8129e68c0f68fc732704e6d6a2ec9b3c8a815f31c06e9"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.643887 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" event={"ID":"cc354305-e26e-45d9-9146-8f62b897f1ac","Type":"ContainerStarted","Data":"5b4cf6751a7b8a422b2db924e32c226c13ad8555ec0557ab0bc34710a0b049a8"} Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.648775 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-6bzb6" podStartSLOduration=34.648760479 podStartE2EDuration="34.648760479s" podCreationTimestamp="2025-11-29 04:30:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:30:58.643510281 +0000 UTC m=+1185.708013815" watchObservedRunningTime="2025-11-29 04:30:58.648760479 +0000 UTC m=+1185.713263993" Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.671926 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-76fdc69464-qvs2b" podStartSLOduration=35.953010321 podStartE2EDuration="36.671907493s" podCreationTimestamp="2025-11-29 04:30:22 +0000 UTC" firstStartedPulling="2025-11-29 04:30:56.373657127 +0000 UTC m=+1183.438160641" lastFinishedPulling="2025-11-29 04:30:57.092554299 +0000 UTC m=+1184.157057813" observedRunningTime="2025-11-29 04:30:58.670746525 +0000 UTC m=+1185.735250039" watchObservedRunningTime="2025-11-29 04:30:58.671907493 +0000 UTC m=+1185.736411007" Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.711472 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-4zg2x" podStartSLOduration=5.654157082 podStartE2EDuration="45.711443857s" podCreationTimestamp="2025-11-29 04:30:13 +0000 UTC" firstStartedPulling="2025-11-29 04:30:16.648636461 +0000 UTC m=+1143.713139975" lastFinishedPulling="2025-11-29 04:30:56.705923236 +0000 UTC m=+1183.770426750" observedRunningTime="2025-11-29 04:30:58.69475794 +0000 UTC m=+1185.759261454" watchObservedRunningTime="2025-11-29 04:30:58.711443857 +0000 UTC m=+1185.775947371" Nov 29 04:30:58 crc kubenswrapper[4631]: I1129 04:30:58.721321 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"874bb6b3-16cb-4d17-bf8b-6d3593d727d0","Type":"ContainerStarted","Data":"69e6e67414059a591eb331d63e43667ab3b19393e67eb4ac5f2c220a1ccedd9f"} Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.020635 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=80.976415044 podStartE2EDuration="1m32.020617323s" podCreationTimestamp="2025-11-29 04:29:27 +0000 UTC" firstStartedPulling="2025-11-29 04:30:05.733024612 +0000 UTC m=+1132.797528126" lastFinishedPulling="2025-11-29 04:30:16.777226891 +0000 UTC m=+1143.841730405" observedRunningTime="2025-11-29 04:30:58.805019888 +0000 UTC m=+1185.869523392" watchObservedRunningTime="2025-11-29 04:30:59.020617323 +0000 UTC m=+1186.085120837" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.069071 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.150666 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.241520 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-cw9bv"] Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.293028 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xrtmb"] Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.294451 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.299498 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.307190 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xrtmb"] Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.437222 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.464039 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.464450 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-config\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.464578 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.464727 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtjpp\" (UniqueName: \"kubernetes.io/projected/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-kube-api-access-dtjpp\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.464821 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.464929 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-svc\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.566681 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-sb\") pod \"cc354305-e26e-45d9-9146-8f62b897f1ac\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.567574 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bltn\" (UniqueName: \"kubernetes.io/projected/cc354305-e26e-45d9-9146-8f62b897f1ac-kube-api-access-5bltn\") pod \"cc354305-e26e-45d9-9146-8f62b897f1ac\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.567805 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-dns-svc\") pod \"cc354305-e26e-45d9-9146-8f62b897f1ac\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.567853 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-config\") pod \"cc354305-e26e-45d9-9146-8f62b897f1ac\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.567928 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-nb\") pod \"cc354305-e26e-45d9-9146-8f62b897f1ac\" (UID: \"cc354305-e26e-45d9-9146-8f62b897f1ac\") " Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.568250 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtjpp\" (UniqueName: \"kubernetes.io/projected/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-kube-api-access-dtjpp\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.568296 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.568363 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-svc\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.568461 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.568498 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-config\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.568536 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.569198 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.569438 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.569777 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-svc\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.569985 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-config\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.571137 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.599658 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc354305-e26e-45d9-9146-8f62b897f1ac-kube-api-access-5bltn" (OuterVolumeSpecName: "kube-api-access-5bltn") pod "cc354305-e26e-45d9-9146-8f62b897f1ac" (UID: "cc354305-e26e-45d9-9146-8f62b897f1ac"). InnerVolumeSpecName "kube-api-access-5bltn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.603416 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cc354305-e26e-45d9-9146-8f62b897f1ac" (UID: "cc354305-e26e-45d9-9146-8f62b897f1ac"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.610414 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-config" (OuterVolumeSpecName: "config") pod "cc354305-e26e-45d9-9146-8f62b897f1ac" (UID: "cc354305-e26e-45d9-9146-8f62b897f1ac"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.611239 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtjpp\" (UniqueName: \"kubernetes.io/projected/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-kube-api-access-dtjpp\") pod \"dnsmasq-dns-55f844cf75-xrtmb\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.622005 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cc354305-e26e-45d9-9146-8f62b897f1ac" (UID: "cc354305-e26e-45d9-9146-8f62b897f1ac"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.668559 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cc354305-e26e-45d9-9146-8f62b897f1ac" (UID: "cc354305-e26e-45d9-9146-8f62b897f1ac"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.669636 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.669655 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.669667 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.669677 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bltn\" (UniqueName: \"kubernetes.io/projected/cc354305-e26e-45d9-9146-8f62b897f1ac-kube-api-access-5bltn\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.669688 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc354305-e26e-45d9-9146-8f62b897f1ac-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.673013 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.775179 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"448a8237-ee30-4d73-86be-d12a309691f5","Type":"ContainerStarted","Data":"1f0e7c5628c5abeb1c044c7a3298116d52aa1c9695542db074d5dfe620ed98cc"} Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.781092 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.783128 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54b684dc7c-ncd8n" event={"ID":"cc354305-e26e-45d9-9146-8f62b897f1ac","Type":"ContainerDied","Data":"5b4cf6751a7b8a422b2db924e32c226c13ad8555ec0557ab0bc34710a0b049a8"} Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.783241 4631 scope.go:117] "RemoveContainer" containerID="f81a682b364e4c0d35c8129e68c0f68fc732704e6d6a2ec9b3c8a815f31c06e9" Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.817300 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" event={"ID":"7b961caf-9678-485f-be86-93f60808ac4d","Type":"ContainerStarted","Data":"27f94d835a6d07e7dd01988c209a4b9280ebba417542ab3d939c3a735c8ebcb2"} Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.876777 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54b684dc7c-ncd8n"] Nov 29 04:30:59 crc kubenswrapper[4631]: I1129 04:30:59.889978 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54b684dc7c-ncd8n"] Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.351760 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xrtmb"] Nov 29 04:31:00 crc kubenswrapper[4631]: W1129 04:31:00.359431 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5fcf4605_b6e4_4d37_9cbf_6f1daa0f32d6.slice/crio-6394df9db4bed55b5f061381658da7a90b69536767f0aa1f428696e92e5853e2 WatchSource:0}: Error finding container 6394df9db4bed55b5f061381658da7a90b69536767f0aa1f428696e92e5853e2: Status 404 returned error can't find the container with id 6394df9db4bed55b5f061381658da7a90b69536767f0aa1f428696e92e5853e2 Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.639873 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7d7446b849-nsq65"] Nov 29 04:31:00 crc kubenswrapper[4631]: E1129 04:31:00.640278 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc354305-e26e-45d9-9146-8f62b897f1ac" containerName="init" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.640288 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc354305-e26e-45d9-9146-8f62b897f1ac" containerName="init" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.640470 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc354305-e26e-45d9-9146-8f62b897f1ac" containerName="init" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.641368 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.647942 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.649800 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.652811 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7d7446b849-nsq65"] Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.798459 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-public-tls-certs\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.798933 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgcbm\" (UniqueName: \"kubernetes.io/projected/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-kube-api-access-jgcbm\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.798962 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-combined-ca-bundle\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.798983 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-ovndb-tls-certs\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.799009 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-config\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.799054 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-httpd-config\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.799070 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-internal-tls-certs\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.832287 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" event={"ID":"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6","Type":"ContainerStarted","Data":"6394df9db4bed55b5f061381658da7a90b69536767f0aa1f428696e92e5853e2"} Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.901060 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-httpd-config\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.901117 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-internal-tls-certs\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.901210 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-public-tls-certs\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.901263 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgcbm\" (UniqueName: \"kubernetes.io/projected/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-kube-api-access-jgcbm\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.901288 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-combined-ca-bundle\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.901308 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-ovndb-tls-certs\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.901388 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-config\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.908423 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-public-tls-certs\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.908497 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-ovndb-tls-certs\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.908965 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-internal-tls-certs\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.909136 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-config\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.913021 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-combined-ca-bundle\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.914891 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-httpd-config\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.918512 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgcbm\" (UniqueName: \"kubernetes.io/projected/7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9-kube-api-access-jgcbm\") pod \"neutron-7d7446b849-nsq65\" (UID: \"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9\") " pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:00 crc kubenswrapper[4631]: I1129 04:31:00.998301 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:01 crc kubenswrapper[4631]: I1129 04:31:01.250930 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc354305-e26e-45d9-9146-8f62b897f1ac" path="/var/lib/kubelet/pods/cc354305-e26e-45d9-9146-8f62b897f1ac/volumes" Nov 29 04:31:01 crc kubenswrapper[4631]: E1129 04:31:01.638120 4631 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5fcf4605_b6e4_4d37_9cbf_6f1daa0f32d6.slice/crio-adf289c7ed6772c8bf242408d1f93ee804fe13655763d22ac04eec6184e08d6a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5fcf4605_b6e4_4d37_9cbf_6f1daa0f32d6.slice/crio-conmon-adf289c7ed6772c8bf242408d1f93ee804fe13655763d22ac04eec6184e08d6a.scope\": RecentStats: unable to find data in memory cache]" Nov 29 04:31:01 crc kubenswrapper[4631]: I1129 04:31:01.799278 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7d7446b849-nsq65"] Nov 29 04:31:01 crc kubenswrapper[4631]: I1129 04:31:01.878709 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f5f7c4c6d-j949k" event={"ID":"61789a4f-3d8f-44c8-b8ea-5d43da626439","Type":"ContainerStarted","Data":"cd7994f5257d63f63b3dec09c5413ad86d9a8349949a5d6affeb80dcec240d51"} Nov 29 04:31:01 crc kubenswrapper[4631]: I1129 04:31:01.880612 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d7446b849-nsq65" event={"ID":"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9","Type":"ContainerStarted","Data":"a3603c0b56519c96680ea29b7903fa2c7e5e5a3c1a43e34d39bf8bad54a9e653"} Nov 29 04:31:01 crc kubenswrapper[4631]: I1129 04:31:01.882670 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e","Type":"ContainerStarted","Data":"74aceef224d1bc0faf362ff7a3cddf05a5ffbca937eb9e41c36c17fff2fe2f28"} Nov 29 04:31:01 crc kubenswrapper[4631]: I1129 04:31:01.903661 4631 generic.go:334] "Generic (PLEG): container finished" podID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" containerID="adf289c7ed6772c8bf242408d1f93ee804fe13655763d22ac04eec6184e08d6a" exitCode=0 Nov 29 04:31:01 crc kubenswrapper[4631]: I1129 04:31:01.903808 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" podUID="7b961caf-9678-485f-be86-93f60808ac4d" containerName="dnsmasq-dns" containerID="cri-o://27f94d835a6d07e7dd01988c209a4b9280ebba417542ab3d939c3a735c8ebcb2" gracePeriod=10 Nov 29 04:31:01 crc kubenswrapper[4631]: I1129 04:31:01.904701 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" event={"ID":"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6","Type":"ContainerDied","Data":"adf289c7ed6772c8bf242408d1f93ee804fe13655763d22ac04eec6184e08d6a"} Nov 29 04:31:01 crc kubenswrapper[4631]: I1129 04:31:01.904747 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:31:01 crc kubenswrapper[4631]: I1129 04:31:01.933161 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" podStartSLOduration=7.933141752 podStartE2EDuration="7.933141752s" podCreationTimestamp="2025-11-29 04:30:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:01.930370354 +0000 UTC m=+1188.994873858" watchObservedRunningTime="2025-11-29 04:31:01.933141752 +0000 UTC m=+1188.997645256" Nov 29 04:31:02 crc kubenswrapper[4631]: I1129 04:31:02.914639 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e","Type":"ContainerStarted","Data":"b500372af6768b07352e19c5b10fe93ded6ebd6317710f179e8461060ac68bb2"} Nov 29 04:31:02 crc kubenswrapper[4631]: I1129 04:31:02.916975 4631 generic.go:334] "Generic (PLEG): container finished" podID="7b961caf-9678-485f-be86-93f60808ac4d" containerID="27f94d835a6d07e7dd01988c209a4b9280ebba417542ab3d939c3a735c8ebcb2" exitCode=0 Nov 29 04:31:02 crc kubenswrapper[4631]: I1129 04:31:02.917043 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" event={"ID":"7b961caf-9678-485f-be86-93f60808ac4d","Type":"ContainerDied","Data":"27f94d835a6d07e7dd01988c209a4b9280ebba417542ab3d939c3a735c8ebcb2"} Nov 29 04:31:02 crc kubenswrapper[4631]: I1129 04:31:02.919987 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"448a8237-ee30-4d73-86be-d12a309691f5","Type":"ContainerStarted","Data":"49725203c0021296d10c62ed7982323191106e31499fd25f3b65b6b57390de26"} Nov 29 04:31:02 crc kubenswrapper[4631]: I1129 04:31:02.920057 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:31:02 crc kubenswrapper[4631]: I1129 04:31:02.920118 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="448a8237-ee30-4d73-86be-d12a309691f5" containerName="glance-log" containerID="cri-o://1f0e7c5628c5abeb1c044c7a3298116d52aa1c9695542db074d5dfe620ed98cc" gracePeriod=30 Nov 29 04:31:02 crc kubenswrapper[4631]: I1129 04:31:02.920156 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="448a8237-ee30-4d73-86be-d12a309691f5" containerName="glance-httpd" containerID="cri-o://49725203c0021296d10c62ed7982323191106e31499fd25f3b65b6b57390de26" gracePeriod=30 Nov 29 04:31:02 crc kubenswrapper[4631]: I1129 04:31:02.940113 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7f5f7c4c6d-j949k" podStartSLOduration=8.940093825 podStartE2EDuration="8.940093825s" podCreationTimestamp="2025-11-29 04:30:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:02.934419516 +0000 UTC m=+1189.998923020" watchObservedRunningTime="2025-11-29 04:31:02.940093825 +0000 UTC m=+1190.004597339" Nov 29 04:31:02 crc kubenswrapper[4631]: I1129 04:31:02.957506 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=8.957475568 podStartE2EDuration="8.957475568s" podCreationTimestamp="2025-11-29 04:30:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:02.955132101 +0000 UTC m=+1190.019635605" watchObservedRunningTime="2025-11-29 04:31:02.957475568 +0000 UTC m=+1190.021979082" Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.142157 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.142208 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.265770 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.266774 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.932523 4631 generic.go:334] "Generic (PLEG): container finished" podID="448a8237-ee30-4d73-86be-d12a309691f5" containerID="49725203c0021296d10c62ed7982323191106e31499fd25f3b65b6b57390de26" exitCode=0 Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.932682 4631 generic.go:334] "Generic (PLEG): container finished" podID="448a8237-ee30-4d73-86be-d12a309691f5" containerID="1f0e7c5628c5abeb1c044c7a3298116d52aa1c9695542db074d5dfe620ed98cc" exitCode=143 Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.933521 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"448a8237-ee30-4d73-86be-d12a309691f5","Type":"ContainerDied","Data":"49725203c0021296d10c62ed7982323191106e31499fd25f3b65b6b57390de26"} Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.933546 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"448a8237-ee30-4d73-86be-d12a309691f5","Type":"ContainerDied","Data":"1f0e7c5628c5abeb1c044c7a3298116d52aa1c9695542db074d5dfe620ed98cc"} Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.933781 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" containerName="glance-log" containerID="cri-o://74aceef224d1bc0faf362ff7a3cddf05a5ffbca937eb9e41c36c17fff2fe2f28" gracePeriod=30 Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.934393 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" containerName="glance-httpd" containerID="cri-o://b500372af6768b07352e19c5b10fe93ded6ebd6317710f179e8461060ac68bb2" gracePeriod=30 Nov 29 04:31:03 crc kubenswrapper[4631]: I1129 04:31:03.963060 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=9.963044778 podStartE2EDuration="9.963044778s" podCreationTimestamp="2025-11-29 04:30:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:03.959595144 +0000 UTC m=+1191.024098658" watchObservedRunningTime="2025-11-29 04:31:03.963044778 +0000 UTC m=+1191.027548292" Nov 29 04:31:04 crc kubenswrapper[4631]: I1129 04:31:04.873598 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" podUID="7b961caf-9678-485f-be86-93f60808ac4d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.148:5353: connect: connection refused" Nov 29 04:31:04 crc kubenswrapper[4631]: I1129 04:31:04.944556 4631 generic.go:334] "Generic (PLEG): container finished" podID="8a612579-d131-4dbd-85bc-ba455a26db3b" containerID="d30c2c6a7b2349da3e750e196662bfa5b0180b9be3c541dc573adad9713b9cdb" exitCode=0 Nov 29 04:31:04 crc kubenswrapper[4631]: I1129 04:31:04.944613 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jmcnk" event={"ID":"8a612579-d131-4dbd-85bc-ba455a26db3b","Type":"ContainerDied","Data":"d30c2c6a7b2349da3e750e196662bfa5b0180b9be3c541dc573adad9713b9cdb"} Nov 29 04:31:04 crc kubenswrapper[4631]: I1129 04:31:04.949877 4631 generic.go:334] "Generic (PLEG): container finished" podID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" containerID="b500372af6768b07352e19c5b10fe93ded6ebd6317710f179e8461060ac68bb2" exitCode=0 Nov 29 04:31:04 crc kubenswrapper[4631]: I1129 04:31:04.949910 4631 generic.go:334] "Generic (PLEG): container finished" podID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" containerID="74aceef224d1bc0faf362ff7a3cddf05a5ffbca937eb9e41c36c17fff2fe2f28" exitCode=143 Nov 29 04:31:04 crc kubenswrapper[4631]: I1129 04:31:04.949931 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e","Type":"ContainerDied","Data":"b500372af6768b07352e19c5b10fe93ded6ebd6317710f179e8461060ac68bb2"} Nov 29 04:31:04 crc kubenswrapper[4631]: I1129 04:31:04.949955 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e","Type":"ContainerDied","Data":"74aceef224d1bc0faf362ff7a3cddf05a5ffbca937eb9e41c36c17fff2fe2f28"} Nov 29 04:31:05 crc kubenswrapper[4631]: I1129 04:31:05.969697 4631 generic.go:334] "Generic (PLEG): container finished" podID="4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" containerID="9bdb5e8368f5ef2303ceaf6ab54c3259104ac54a2908cf05b3fff429ecb41a28" exitCode=0 Nov 29 04:31:05 crc kubenswrapper[4631]: I1129 04:31:05.969958 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6bzb6" event={"ID":"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed","Type":"ContainerDied","Data":"9bdb5e8368f5ef2303ceaf6ab54c3259104ac54a2908cf05b3fff429ecb41a28"} Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.744074 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jmcnk" Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.792001 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.830442 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a612579-d131-4dbd-85bc-ba455a26db3b-logs\") pod \"8a612579-d131-4dbd-85bc-ba455a26db3b\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.830506 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-combined-ca-bundle\") pod \"8a612579-d131-4dbd-85bc-ba455a26db3b\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.830570 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-config-data\") pod \"8a612579-d131-4dbd-85bc-ba455a26db3b\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.830711 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-scripts\") pod \"8a612579-d131-4dbd-85bc-ba455a26db3b\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.830762 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vcbk2\" (UniqueName: \"kubernetes.io/projected/8a612579-d131-4dbd-85bc-ba455a26db3b-kube-api-access-vcbk2\") pod \"8a612579-d131-4dbd-85bc-ba455a26db3b\" (UID: \"8a612579-d131-4dbd-85bc-ba455a26db3b\") " Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.837493 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a612579-d131-4dbd-85bc-ba455a26db3b-logs" (OuterVolumeSpecName: "logs") pod "8a612579-d131-4dbd-85bc-ba455a26db3b" (UID: "8a612579-d131-4dbd-85bc-ba455a26db3b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.865913 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a612579-d131-4dbd-85bc-ba455a26db3b-kube-api-access-vcbk2" (OuterVolumeSpecName: "kube-api-access-vcbk2") pod "8a612579-d131-4dbd-85bc-ba455a26db3b" (UID: "8a612579-d131-4dbd-85bc-ba455a26db3b"). InnerVolumeSpecName "kube-api-access-vcbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.883507 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-scripts" (OuterVolumeSpecName: "scripts") pod "8a612579-d131-4dbd-85bc-ba455a26db3b" (UID: "8a612579-d131-4dbd-85bc-ba455a26db3b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.932896 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-nb\") pod \"7b961caf-9678-485f-be86-93f60808ac4d\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.933074 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4t6z6\" (UniqueName: \"kubernetes.io/projected/7b961caf-9678-485f-be86-93f60808ac4d-kube-api-access-4t6z6\") pod \"7b961caf-9678-485f-be86-93f60808ac4d\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.934377 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-sb\") pod \"7b961caf-9678-485f-be86-93f60808ac4d\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.934488 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-config\") pod \"7b961caf-9678-485f-be86-93f60808ac4d\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.934573 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-dns-svc\") pod \"7b961caf-9678-485f-be86-93f60808ac4d\" (UID: \"7b961caf-9678-485f-be86-93f60808ac4d\") " Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.935233 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.935251 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vcbk2\" (UniqueName: \"kubernetes.io/projected/8a612579-d131-4dbd-85bc-ba455a26db3b-kube-api-access-vcbk2\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.935260 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8a612579-d131-4dbd-85bc-ba455a26db3b-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.943257 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b961caf-9678-485f-be86-93f60808ac4d-kube-api-access-4t6z6" (OuterVolumeSpecName: "kube-api-access-4t6z6") pod "7b961caf-9678-485f-be86-93f60808ac4d" (UID: "7b961caf-9678-485f-be86-93f60808ac4d"). InnerVolumeSpecName "kube-api-access-4t6z6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.984991 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jmcnk" event={"ID":"8a612579-d131-4dbd-85bc-ba455a26db3b","Type":"ContainerDied","Data":"0f356b678d94a731123a4e141da237e9a76b3020cce9d2f08bacf4d2fd0d20a0"} Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.985035 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f356b678d94a731123a4e141da237e9a76b3020cce9d2f08bacf4d2fd0d20a0" Nov 29 04:31:06 crc kubenswrapper[4631]: I1129 04:31:06.985101 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jmcnk" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.009508 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a612579-d131-4dbd-85bc-ba455a26db3b" (UID: "8a612579-d131-4dbd-85bc-ba455a26db3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.021588 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-config-data" (OuterVolumeSpecName: "config-data") pod "8a612579-d131-4dbd-85bc-ba455a26db3b" (UID: "8a612579-d131-4dbd-85bc-ba455a26db3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.040160 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.040510 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-cw9bv" event={"ID":"7b961caf-9678-485f-be86-93f60808ac4d","Type":"ContainerDied","Data":"1b270147ec17d6908ecce2d9c567772e4cb0c44b171ddd8ae2f0254c87fa7ba4"} Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.040542 4631 scope.go:117] "RemoveContainer" containerID="27f94d835a6d07e7dd01988c209a4b9280ebba417542ab3d939c3a735c8ebcb2" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.044283 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4t6z6\" (UniqueName: \"kubernetes.io/projected/7b961caf-9678-485f-be86-93f60808ac4d-kube-api-access-4t6z6\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.044677 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.044692 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a612579-d131-4dbd-85bc-ba455a26db3b-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.091301 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.129403 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-config" (OuterVolumeSpecName: "config") pod "7b961caf-9678-485f-be86-93f60808ac4d" (UID: "7b961caf-9678-485f-be86-93f60808ac4d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.129427 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6c8fbfb7d4-6m5ww"] Nov 29 04:31:07 crc kubenswrapper[4631]: E1129 04:31:07.129807 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="448a8237-ee30-4d73-86be-d12a309691f5" containerName="glance-httpd" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.129823 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="448a8237-ee30-4d73-86be-d12a309691f5" containerName="glance-httpd" Nov 29 04:31:07 crc kubenswrapper[4631]: E1129 04:31:07.129837 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b961caf-9678-485f-be86-93f60808ac4d" containerName="dnsmasq-dns" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.129845 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b961caf-9678-485f-be86-93f60808ac4d" containerName="dnsmasq-dns" Nov 29 04:31:07 crc kubenswrapper[4631]: E1129 04:31:07.129860 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="448a8237-ee30-4d73-86be-d12a309691f5" containerName="glance-log" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.129866 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="448a8237-ee30-4d73-86be-d12a309691f5" containerName="glance-log" Nov 29 04:31:07 crc kubenswrapper[4631]: E1129 04:31:07.129876 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b961caf-9678-485f-be86-93f60808ac4d" containerName="init" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.129881 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b961caf-9678-485f-be86-93f60808ac4d" containerName="init" Nov 29 04:31:07 crc kubenswrapper[4631]: E1129 04:31:07.129898 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a612579-d131-4dbd-85bc-ba455a26db3b" containerName="placement-db-sync" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.129905 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a612579-d131-4dbd-85bc-ba455a26db3b" containerName="placement-db-sync" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.130054 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="448a8237-ee30-4d73-86be-d12a309691f5" containerName="glance-log" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.130071 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a612579-d131-4dbd-85bc-ba455a26db3b" containerName="placement-db-sync" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.130082 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="448a8237-ee30-4d73-86be-d12a309691f5" containerName="glance-httpd" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.130094 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b961caf-9678-485f-be86-93f60808ac4d" containerName="dnsmasq-dns" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.130956 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.136951 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.137719 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.145694 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6c8fbfb7d4-6m5ww"] Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.146794 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.160722 4631 scope.go:117] "RemoveContainer" containerID="23e12f2f4f3b6506fe8fbad410a913f052575bba2d2e514f1581daec25561142" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.234987 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7b961caf-9678-485f-be86-93f60808ac4d" (UID: "7b961caf-9678-485f-be86-93f60808ac4d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.247544 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-scripts\") pod \"448a8237-ee30-4d73-86be-d12a309691f5\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.247671 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-config-data\") pod \"448a8237-ee30-4d73-86be-d12a309691f5\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.247761 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-httpd-run\") pod \"448a8237-ee30-4d73-86be-d12a309691f5\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.247834 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t56jh\" (UniqueName: \"kubernetes.io/projected/448a8237-ee30-4d73-86be-d12a309691f5-kube-api-access-t56jh\") pod \"448a8237-ee30-4d73-86be-d12a309691f5\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.247881 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-logs\") pod \"448a8237-ee30-4d73-86be-d12a309691f5\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.247937 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"448a8237-ee30-4d73-86be-d12a309691f5\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.247957 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-combined-ca-bundle\") pod \"448a8237-ee30-4d73-86be-d12a309691f5\" (UID: \"448a8237-ee30-4d73-86be-d12a309691f5\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.248264 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v67n5\" (UniqueName: \"kubernetes.io/projected/1d801d33-e580-4849-ab8b-6f2a21118b1f-kube-api-access-v67n5\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.248294 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-config-data\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.248311 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-scripts\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.248353 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-internal-tls-certs\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.248381 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-combined-ca-bundle\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.248447 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d801d33-e580-4849-ab8b-6f2a21118b1f-logs\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.248639 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-public-tls-certs\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.248696 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.252751 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "448a8237-ee30-4d73-86be-d12a309691f5" (UID: "448a8237-ee30-4d73-86be-d12a309691f5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.255401 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-logs" (OuterVolumeSpecName: "logs") pod "448a8237-ee30-4d73-86be-d12a309691f5" (UID: "448a8237-ee30-4d73-86be-d12a309691f5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.260056 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7b961caf-9678-485f-be86-93f60808ac4d" (UID: "7b961caf-9678-485f-be86-93f60808ac4d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.286073 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/448a8237-ee30-4d73-86be-d12a309691f5-kube-api-access-t56jh" (OuterVolumeSpecName: "kube-api-access-t56jh") pod "448a8237-ee30-4d73-86be-d12a309691f5" (UID: "448a8237-ee30-4d73-86be-d12a309691f5"). InnerVolumeSpecName "kube-api-access-t56jh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.286117 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-scripts" (OuterVolumeSpecName: "scripts") pod "448a8237-ee30-4d73-86be-d12a309691f5" (UID: "448a8237-ee30-4d73-86be-d12a309691f5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.286814 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7b961caf-9678-485f-be86-93f60808ac4d" (UID: "7b961caf-9678-485f-be86-93f60808ac4d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.300193 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "448a8237-ee30-4d73-86be-d12a309691f5" (UID: "448a8237-ee30-4d73-86be-d12a309691f5"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350297 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d801d33-e580-4849-ab8b-6f2a21118b1f-logs\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350644 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-public-tls-certs\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350701 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v67n5\" (UniqueName: \"kubernetes.io/projected/1d801d33-e580-4849-ab8b-6f2a21118b1f-kube-api-access-v67n5\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350725 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-config-data\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350745 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-scripts\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350763 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-internal-tls-certs\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350786 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-combined-ca-bundle\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350903 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350928 4631 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350940 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b961caf-9678-485f-be86-93f60808ac4d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350949 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350958 4631 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350966 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t56jh\" (UniqueName: \"kubernetes.io/projected/448a8237-ee30-4d73-86be-d12a309691f5-kube-api-access-t56jh\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.350976 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/448a8237-ee30-4d73-86be-d12a309691f5-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.358852 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d801d33-e580-4849-ab8b-6f2a21118b1f-logs\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.390716 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-scripts\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.395308 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-config-data\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.399946 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-combined-ca-bundle\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.418350 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-public-tls-certs\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.440425 4631 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.441019 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d801d33-e580-4849-ab8b-6f2a21118b1f-internal-tls-certs\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.448991 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v67n5\" (UniqueName: \"kubernetes.io/projected/1d801d33-e580-4849-ab8b-6f2a21118b1f-kube-api-access-v67n5\") pod \"placement-6c8fbfb7d4-6m5ww\" (UID: \"1d801d33-e580-4849-ab8b-6f2a21118b1f\") " pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.456564 4631 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.504411 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.523588 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "448a8237-ee30-4d73-86be-d12a309691f5" (UID: "448a8237-ee30-4d73-86be-d12a309691f5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.523711 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-config-data" (OuterVolumeSpecName: "config-data") pod "448a8237-ee30-4d73-86be-d12a309691f5" (UID: "448a8237-ee30-4d73-86be-d12a309691f5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.532480 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.562571 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.562598 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/448a8237-ee30-4d73-86be-d12a309691f5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.615786 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.651016 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-cw9bv"] Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.664110 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-config-data\") pod \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.664248 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-httpd-run\") pod \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.664271 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-combined-ca-bundle\") pod \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.664302 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dh497\" (UniqueName: \"kubernetes.io/projected/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-kube-api-access-dh497\") pod \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.664353 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-scripts\") pod \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.664394 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.664463 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-logs\") pod \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\" (UID: \"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.665592 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-logs" (OuterVolumeSpecName: "logs") pod "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" (UID: "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.666105 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" (UID: "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.682387 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" (UID: "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.682873 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-kube-api-access-dh497" (OuterVolumeSpecName: "kube-api-access-dh497") pod "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" (UID: "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e"). InnerVolumeSpecName "kube-api-access-dh497". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.683305 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-scripts" (OuterVolumeSpecName: "scripts") pod "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" (UID: "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.689394 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-cw9bv"] Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.734614 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" (UID: "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.766653 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-credential-keys\") pod \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.766737 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-combined-ca-bundle\") pod \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.766769 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whtqq\" (UniqueName: \"kubernetes.io/projected/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-kube-api-access-whtqq\") pod \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.766811 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-scripts\") pod \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.766880 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-config-data\") pod \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.766919 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-fernet-keys\") pod \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\" (UID: \"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed\") " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.767251 4631 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.767265 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.767277 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dh497\" (UniqueName: \"kubernetes.io/projected/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-kube-api-access-dh497\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.767284 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.767300 4631 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.767309 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.770749 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-scripts" (OuterVolumeSpecName: "scripts") pod "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" (UID: "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.773067 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-kube-api-access-whtqq" (OuterVolumeSpecName: "kube-api-access-whtqq") pod "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" (UID: "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed"). InnerVolumeSpecName "kube-api-access-whtqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.775669 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" (UID: "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.777005 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" (UID: "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.817079 4631 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.819614 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-config-data" (OuterVolumeSpecName: "config-data") pod "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" (UID: "b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.820564 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-config-data" (OuterVolumeSpecName: "config-data") pod "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" (UID: "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.824561 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" (UID: "4d744ff5-22a8-445d-a1f9-a7fd1030d5ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.871284 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whtqq\" (UniqueName: \"kubernetes.io/projected/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-kube-api-access-whtqq\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.871312 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.871323 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.871343 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.871351 4631 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.871359 4631 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.871367 4631 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:07 crc kubenswrapper[4631]: I1129 04:31:07.871374 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.048765 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d7446b849-nsq65" event={"ID":"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9","Type":"ContainerStarted","Data":"9e17c0f680de4957afc1c2463e28ff13de965f8f3ae99ff5cffb34dda193a158"} Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.048813 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7d7446b849-nsq65" event={"ID":"7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9","Type":"ContainerStarted","Data":"089effaa69fabd5a84a624f6cc4b3b0e017e346362ef06d2948566ffb4921e8f"} Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.050316 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6bzb6" event={"ID":"4d744ff5-22a8-445d-a1f9-a7fd1030d5ed","Type":"ContainerDied","Data":"c0a8467046c1c09cb667c26e37e33c49934cfbe4e16af9a5c220931462c99b5d"} Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.050355 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0a8467046c1c09cb667c26e37e33c49934cfbe4e16af9a5c220931462c99b5d" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.050408 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6bzb6" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.053149 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e","Type":"ContainerDied","Data":"b2905cfbb27132258fc33017455de1e7e4a603b93de37574fd5f2404b7bbd25a"} Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.053198 4631 scope.go:117] "RemoveContainer" containerID="b500372af6768b07352e19c5b10fe93ded6ebd6317710f179e8461060ac68bb2" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.053315 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.078465 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"448a8237-ee30-4d73-86be-d12a309691f5","Type":"ContainerDied","Data":"4e50ed2388f9b1fa2a40d1d288783f52e2c92bead667157277d771822601f304"} Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.079016 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.080387 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" event={"ID":"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6","Type":"ContainerStarted","Data":"ff8cc64bd9d22607dfc288c70f096ef509d908e83b2947abfe6601744ea0224f"} Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.080890 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.087828 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"782a0b9b-d16f-495e-a648-e8a03af1e2d2","Type":"ContainerStarted","Data":"6aafa51458bdfd083742597502d5cfbfc83b12e11f970844c626abb13f0be4f7"} Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.102906 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.118465 4631 scope.go:117] "RemoveContainer" containerID="74aceef224d1bc0faf362ff7a3cddf05a5ffbca937eb9e41c36c17fff2fe2f28" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.140250 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6ff47d4689-gnj7t"] Nov 29 04:31:08 crc kubenswrapper[4631]: E1129 04:31:08.140812 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" containerName="glance-httpd" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.140833 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" containerName="glance-httpd" Nov 29 04:31:08 crc kubenswrapper[4631]: E1129 04:31:08.140864 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" containerName="keystone-bootstrap" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.140872 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" containerName="keystone-bootstrap" Nov 29 04:31:08 crc kubenswrapper[4631]: E1129 04:31:08.140886 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" containerName="glance-log" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.140903 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" containerName="glance-log" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.141088 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" containerName="glance-httpd" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.141114 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" containerName="keystone-bootstrap" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.141131 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" containerName="glance-log" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.141889 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.149486 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.149761 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-zjswg" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.149880 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.149938 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.149976 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.150177 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.161363 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.179668 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6c8fbfb7d4-6m5ww"] Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.187187 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6ff47d4689-gnj7t"] Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.198372 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.199903 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.203700 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" podStartSLOduration=9.203684528 podStartE2EDuration="9.203684528s" podCreationTimestamp="2025-11-29 04:30:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:08.151791273 +0000 UTC m=+1195.216294787" watchObservedRunningTime="2025-11-29 04:31:08.203684528 +0000 UTC m=+1195.268188042" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.204731 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.204938 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.205048 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-qmjvc" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.205153 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.214250 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.251647 4631 scope.go:117] "RemoveContainer" containerID="49725203c0021296d10c62ed7982323191106e31499fd25f3b65b6b57390de26" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.255612 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.272975 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282744 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282789 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-fernet-keys\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282824 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-config-data\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282838 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-combined-ca-bundle\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282853 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-internal-tls-certs\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282896 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-scripts\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282913 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-public-tls-certs\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282927 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-credential-keys\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282947 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282965 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.282986 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.283030 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-config-data\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.283057 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-logs\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.283073 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjnh5\" (UniqueName: \"kubernetes.io/projected/7771bd0b-e533-499d-9b8a-9071eb930e26-kube-api-access-tjnh5\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.283088 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbddt\" (UniqueName: \"kubernetes.io/projected/caf43f43-1632-4a05-902b-6c25b8dadf71-kube-api-access-dbddt\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.283103 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-scripts\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.321845 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.323674 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.341963 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.342945 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.374664 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.379139 4631 scope.go:117] "RemoveContainer" containerID="1f0e7c5628c5abeb1c044c7a3298116d52aa1c9695542db074d5dfe620ed98cc" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384410 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-config-data\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384439 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-internal-tls-certs\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384461 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-combined-ca-bundle\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384491 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-config-data\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384523 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctsth\" (UniqueName: \"kubernetes.io/projected/c3760278-593c-4aa1-9ab5-db3403795f2c-kube-api-access-ctsth\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384544 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-scripts\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384563 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-public-tls-certs\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384592 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-credential-keys\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384610 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-logs\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384630 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384652 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384674 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384690 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384707 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384738 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-config-data\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384765 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-logs\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384782 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjnh5\" (UniqueName: \"kubernetes.io/projected/7771bd0b-e533-499d-9b8a-9071eb930e26-kube-api-access-tjnh5\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384797 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384813 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbddt\" (UniqueName: \"kubernetes.io/projected/caf43f43-1632-4a05-902b-6c25b8dadf71-kube-api-access-dbddt\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384832 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-scripts\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384853 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384873 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384894 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-fernet-keys\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.384912 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-scripts\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.391903 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.398167 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-logs\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.403875 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-scripts\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.410220 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.413945 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-scripts\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.416970 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-credential-keys\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.419018 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.419760 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbddt\" (UniqueName: \"kubernetes.io/projected/caf43f43-1632-4a05-902b-6c25b8dadf71-kube-api-access-dbddt\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.420514 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-config-data\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.420778 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-fernet-keys\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.422192 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.424231 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-config-data\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.424592 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-internal-tls-certs\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.424912 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-public-tls-certs\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.431499 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7771bd0b-e533-499d-9b8a-9071eb930e26-combined-ca-bundle\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.441843 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjnh5\" (UniqueName: \"kubernetes.io/projected/7771bd0b-e533-499d-9b8a-9071eb930e26-kube-api-access-tjnh5\") pod \"keystone-6ff47d4689-gnj7t\" (UID: \"7771bd0b-e533-499d-9b8a-9071eb930e26\") " pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.473752 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.488492 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.488724 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctsth\" (UniqueName: \"kubernetes.io/projected/c3760278-593c-4aa1-9ab5-db3403795f2c-kube-api-access-ctsth\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.488773 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-logs\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.488820 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.488835 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.490092 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-logs\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.491520 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.491602 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.491645 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-scripts\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.491713 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-config-data\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.492823 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.492881 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.498819 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.507107 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.507452 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-config-data\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.508447 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-scripts\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.511390 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctsth\" (UniqueName: \"kubernetes.io/projected/c3760278-593c-4aa1-9ab5-db3403795f2c-kube-api-access-ctsth\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.559596 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.662161 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.671250 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 04:31:08 crc kubenswrapper[4631]: I1129 04:31:08.979355 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6ff47d4689-gnj7t"] Nov 29 04:31:09 crc kubenswrapper[4631]: I1129 04:31:09.152932 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6ff47d4689-gnj7t" event={"ID":"7771bd0b-e533-499d-9b8a-9071eb930e26","Type":"ContainerStarted","Data":"4a99a78d971847c65692853ac083861e012c1b2c4d5de8ebd302d5452f12c7d3"} Nov 29 04:31:09 crc kubenswrapper[4631]: I1129 04:31:09.212504 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6c8fbfb7d4-6m5ww" event={"ID":"1d801d33-e580-4849-ab8b-6f2a21118b1f","Type":"ContainerStarted","Data":"bb700441de0d9af3caebea9dbcf0fd0ce8501d1db68ba4d8dabe538de5d91e5b"} Nov 29 04:31:09 crc kubenswrapper[4631]: I1129 04:31:09.212757 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6c8fbfb7d4-6m5ww" event={"ID":"1d801d33-e580-4849-ab8b-6f2a21118b1f","Type":"ContainerStarted","Data":"6e1238318ac2ba4ddd8761c791cd6b613d2f19a2238db6bc09da82222e64e91b"} Nov 29 04:31:09 crc kubenswrapper[4631]: I1129 04:31:09.213403 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:09 crc kubenswrapper[4631]: I1129 04:31:09.251265 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7d7446b849-nsq65" podStartSLOduration=9.251248521 podStartE2EDuration="9.251248521s" podCreationTimestamp="2025-11-29 04:31:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:09.240924529 +0000 UTC m=+1196.305428043" watchObservedRunningTime="2025-11-29 04:31:09.251248521 +0000 UTC m=+1196.315752035" Nov 29 04:31:09 crc kubenswrapper[4631]: I1129 04:31:09.272898 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="448a8237-ee30-4d73-86be-d12a309691f5" path="/var/lib/kubelet/pods/448a8237-ee30-4d73-86be-d12a309691f5/volumes" Nov 29 04:31:09 crc kubenswrapper[4631]: I1129 04:31:09.273751 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b961caf-9678-485f-be86-93f60808ac4d" path="/var/lib/kubelet/pods/7b961caf-9678-485f-be86-93f60808ac4d/volumes" Nov 29 04:31:09 crc kubenswrapper[4631]: I1129 04:31:09.275458 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e" path="/var/lib/kubelet/pods/b2b2ff8d-8a85-49b8-a63f-df9ffb52c27e/volumes" Nov 29 04:31:09 crc kubenswrapper[4631]: I1129 04:31:09.297693 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:31:09 crc kubenswrapper[4631]: I1129 04:31:09.476348 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:31:09 crc kubenswrapper[4631]: W1129 04:31:09.511986 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcaf43f43_1632_4a05_902b_6c25b8dadf71.slice/crio-611dc4efbf211a3fffdf73bec7870136b9485656cd01d0e5ac3e754c2f797ab7 WatchSource:0}: Error finding container 611dc4efbf211a3fffdf73bec7870136b9485656cd01d0e5ac3e754c2f797ab7: Status 404 returned error can't find the container with id 611dc4efbf211a3fffdf73bec7870136b9485656cd01d0e5ac3e754c2f797ab7 Nov 29 04:31:10 crc kubenswrapper[4631]: I1129 04:31:10.593133 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6ff47d4689-gnj7t" event={"ID":"7771bd0b-e533-499d-9b8a-9071eb930e26","Type":"ContainerStarted","Data":"dadfbf84c03959919f4fb9af62eb5c6882e925fa00e17e7c84419a24d431e221"} Nov 29 04:31:10 crc kubenswrapper[4631]: I1129 04:31:10.593561 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:10 crc kubenswrapper[4631]: I1129 04:31:10.595229 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"caf43f43-1632-4a05-902b-6c25b8dadf71","Type":"ContainerStarted","Data":"611dc4efbf211a3fffdf73bec7870136b9485656cd01d0e5ac3e754c2f797ab7"} Nov 29 04:31:10 crc kubenswrapper[4631]: I1129 04:31:10.597278 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6c8fbfb7d4-6m5ww" event={"ID":"1d801d33-e580-4849-ab8b-6f2a21118b1f","Type":"ContainerStarted","Data":"983db78fe0ea09e79ed370698f813e244e650913550df2faee8a77dd4af5a9a3"} Nov 29 04:31:10 crc kubenswrapper[4631]: I1129 04:31:10.597622 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:10 crc kubenswrapper[4631]: I1129 04:31:10.601303 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c3760278-593c-4aa1-9ab5-db3403795f2c","Type":"ContainerStarted","Data":"81d177e0dfcdca12fa3773440103f534cd897eefecc759ad032d3937b9e852b9"} Nov 29 04:31:10 crc kubenswrapper[4631]: I1129 04:31:10.603433 4631 generic.go:334] "Generic (PLEG): container finished" podID="77e723d0-49bf-4008-bbce-7c1fe2ad3a5d" containerID="bc62497bf619cacb0fdf8dcd7974505a74ab66a52fdbe5b93e3b60a31820e5e5" exitCode=0 Nov 29 04:31:10 crc kubenswrapper[4631]: I1129 04:31:10.604196 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-4zg2x" event={"ID":"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d","Type":"ContainerDied","Data":"bc62497bf619cacb0fdf8dcd7974505a74ab66a52fdbe5b93e3b60a31820e5e5"} Nov 29 04:31:10 crc kubenswrapper[4631]: I1129 04:31:10.615680 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6c8fbfb7d4-6m5ww" podStartSLOduration=3.6156610970000003 podStartE2EDuration="3.615661097s" podCreationTimestamp="2025-11-29 04:31:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:10.614128069 +0000 UTC m=+1197.678631583" watchObservedRunningTime="2025-11-29 04:31:10.615661097 +0000 UTC m=+1197.680164611" Nov 29 04:31:10 crc kubenswrapper[4631]: I1129 04:31:10.659130 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6ff47d4689-gnj7t" podStartSLOduration=2.659109986 podStartE2EDuration="2.659109986s" podCreationTimestamp="2025-11-29 04:31:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:10.641956398 +0000 UTC m=+1197.706459912" watchObservedRunningTime="2025-11-29 04:31:10.659109986 +0000 UTC m=+1197.723613500" Nov 29 04:31:11 crc kubenswrapper[4631]: I1129 04:31:11.616638 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c3760278-593c-4aa1-9ab5-db3403795f2c","Type":"ContainerStarted","Data":"c071365af6db32296f28d6d0e12d7003fcb31fa1330ad7c3d1e9c2898fd530f5"} Nov 29 04:31:11 crc kubenswrapper[4631]: I1129 04:31:11.618445 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"caf43f43-1632-4a05-902b-6c25b8dadf71","Type":"ContainerStarted","Data":"9888df03259e546ad91a21768846ce5b505694ccd1d719cc5de7d804d697b58f"} Nov 29 04:31:11 crc kubenswrapper[4631]: I1129 04:31:11.618650 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:11 crc kubenswrapper[4631]: I1129 04:31:11.976545 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.119391 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-db-sync-config-data\") pod \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.119776 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2h4b\" (UniqueName: \"kubernetes.io/projected/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-kube-api-access-r2h4b\") pod \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.119899 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-combined-ca-bundle\") pod \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\" (UID: \"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d\") " Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.125803 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "77e723d0-49bf-4008-bbce-7c1fe2ad3a5d" (UID: "77e723d0-49bf-4008-bbce-7c1fe2ad3a5d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.128156 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-kube-api-access-r2h4b" (OuterVolumeSpecName: "kube-api-access-r2h4b") pod "77e723d0-49bf-4008-bbce-7c1fe2ad3a5d" (UID: "77e723d0-49bf-4008-bbce-7c1fe2ad3a5d"). InnerVolumeSpecName "kube-api-access-r2h4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.147640 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77e723d0-49bf-4008-bbce-7c1fe2ad3a5d" (UID: "77e723d0-49bf-4008-bbce-7c1fe2ad3a5d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.222062 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2h4b\" (UniqueName: \"kubernetes.io/projected/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-kube-api-access-r2h4b\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.222489 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.222508 4631 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.626982 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-4zg2x" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.631752 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-4zg2x" event={"ID":"77e723d0-49bf-4008-bbce-7c1fe2ad3a5d","Type":"ContainerDied","Data":"94a26d1ac58b90921c5aa447e84d3bf1c1a3c7851eae14f2b58a0fd49c3c0a74"} Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.632226 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94a26d1ac58b90921c5aa447e84d3bf1c1a3c7851eae14f2b58a0fd49c3c0a74" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.903018 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-777b8bd98c-blh5p"] Nov 29 04:31:12 crc kubenswrapper[4631]: E1129 04:31:12.903346 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77e723d0-49bf-4008-bbce-7c1fe2ad3a5d" containerName="barbican-db-sync" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.903357 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="77e723d0-49bf-4008-bbce-7c1fe2ad3a5d" containerName="barbican-db-sync" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.903537 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="77e723d0-49bf-4008-bbce-7c1fe2ad3a5d" containerName="barbican-db-sync" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.904374 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.910161 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-k9j2z" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.913506 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.913693 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.939159 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-777b8bd98c-blh5p"] Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.955929 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-5555f74b94-58bwl"] Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.957303 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.966297 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 29 04:31:12 crc kubenswrapper[4631]: I1129 04:31:12.996889 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5555f74b94-58bwl"] Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.040525 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5880470-8751-4613-82c7-33efabd35a6e-combined-ca-bundle\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.040806 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e5880470-8751-4613-82c7-33efabd35a6e-config-data-custom\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.040854 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5880470-8751-4613-82c7-33efabd35a6e-config-data\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.040914 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e5880470-8751-4613-82c7-33efabd35a6e-logs\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.040931 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j55kk\" (UniqueName: \"kubernetes.io/projected/e5880470-8751-4613-82c7-33efabd35a6e-kube-api-access-j55kk\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.096708 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-65dc8fcc6b-fns29"] Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.097999 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.102075 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.133481 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-65dc8fcc6b-fns29"] Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.143027 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5880470-8751-4613-82c7-33efabd35a6e-combined-ca-bundle\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.143100 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-config-data\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.143190 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-config-data-custom\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.143279 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e5880470-8751-4613-82c7-33efabd35a6e-config-data-custom\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.143370 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrlwb\" (UniqueName: \"kubernetes.io/projected/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-kube-api-access-lrlwb\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.143398 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5880470-8751-4613-82c7-33efabd35a6e-config-data\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.143416 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-logs\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.143432 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-combined-ca-bundle\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.143467 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e5880470-8751-4613-82c7-33efabd35a6e-logs\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.143483 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j55kk\" (UniqueName: \"kubernetes.io/projected/e5880470-8751-4613-82c7-33efabd35a6e-kube-api-access-j55kk\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.144902 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e5880470-8751-4613-82c7-33efabd35a6e-logs\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.149901 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5880470-8751-4613-82c7-33efabd35a6e-combined-ca-bundle\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.150850 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.150958 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.162286 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5880470-8751-4613-82c7-33efabd35a6e-config-data\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.166592 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e5880470-8751-4613-82c7-33efabd35a6e-config-data-custom\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.171580 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.197610 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j55kk\" (UniqueName: \"kubernetes.io/projected/e5880470-8751-4613-82c7-33efabd35a6e-kube-api-access-j55kk\") pod \"barbican-worker-777b8bd98c-blh5p\" (UID: \"e5880470-8751-4613-82c7-33efabd35a6e\") " pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.245606 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-config-data\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.245654 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/125ef941-9d17-4b6e-b395-1deac81e80d8-logs\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.245672 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.245717 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-config-data-custom\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.245768 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrlwb\" (UniqueName: \"kubernetes.io/projected/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-kube-api-access-lrlwb\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.245790 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq77x\" (UniqueName: \"kubernetes.io/projected/125ef941-9d17-4b6e-b395-1deac81e80d8-kube-api-access-wq77x\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.245809 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-logs\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.245826 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-combined-ca-bundle\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.245866 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data-custom\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.245885 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-combined-ca-bundle\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.248911 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-logs\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.257295 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-k9j2z" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.258142 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.260019 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-777b8bd98c-blh5p" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.264429 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-config-data\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.266035 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-combined-ca-bundle\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.268054 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrlwb\" (UniqueName: \"kubernetes.io/projected/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-kube-api-access-lrlwb\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.272257 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5964d597b6-rfcr2" podUID="e2a6410f-6c69-4b87-a247-b285aef98b71" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.277033 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f3ff1d2b-0fc8-49dc-a02f-948b81d54988-config-data-custom\") pod \"barbican-keystone-listener-5555f74b94-58bwl\" (UID: \"f3ff1d2b-0fc8-49dc-a02f-948b81d54988\") " pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.329260 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.351685 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wq77x\" (UniqueName: \"kubernetes.io/projected/125ef941-9d17-4b6e-b395-1deac81e80d8-kube-api-access-wq77x\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.351831 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data-custom\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.351861 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-combined-ca-bundle\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.351990 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/125ef941-9d17-4b6e-b395-1deac81e80d8-logs\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.352016 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.354978 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.606799 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xrtmb"] Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.607023 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" podUID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" containerName="dnsmasq-dns" containerID="cri-o://ff8cc64bd9d22607dfc288c70f096ef509d908e83b2947abfe6601744ea0224f" gracePeriod=10 Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.611266 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.633722 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/125ef941-9d17-4b6e-b395-1deac81e80d8-logs\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.641471 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-klnj5"] Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.642848 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.652983 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data-custom\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.653793 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq77x\" (UniqueName: \"kubernetes.io/projected/125ef941-9d17-4b6e-b395-1deac81e80d8-kube-api-access-wq77x\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.658992 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-combined-ca-bundle\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.667367 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data\") pod \"barbican-api-65dc8fcc6b-fns29\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.676546 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-klnj5"] Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.727301 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.771252 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-config\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.771297 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.771420 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.771446 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-svc\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.771499 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rr4w\" (UniqueName: \"kubernetes.io/projected/1a245b27-d3b7-44b8-8680-d7381bf83f08-kube-api-access-6rr4w\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.771515 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.873525 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.874112 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-svc\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.874878 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rr4w\" (UniqueName: \"kubernetes.io/projected/1a245b27-d3b7-44b8-8680-d7381bf83f08-kube-api-access-6rr4w\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.874915 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.875180 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-config\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.875229 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.875761 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.876113 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.876262 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-svc\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.876849 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.877019 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-config\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.916918 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rr4w\" (UniqueName: \"kubernetes.io/projected/1a245b27-d3b7-44b8-8680-d7381bf83f08-kube-api-access-6rr4w\") pod \"dnsmasq-dns-85ff748b95-klnj5\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.969037 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-777b8bd98c-blh5p"] Nov 29 04:31:13 crc kubenswrapper[4631]: I1129 04:31:13.986796 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:14 crc kubenswrapper[4631]: W1129 04:31:13.998308 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode5880470_8751_4613_82c7_33efabd35a6e.slice/crio-f66e599b89cbb844d15077182bec11f99dd63b0b9ae1dbcefca54333c5132ac3 WatchSource:0}: Error finding container f66e599b89cbb844d15077182bec11f99dd63b0b9ae1dbcefca54333c5132ac3: Status 404 returned error can't find the container with id f66e599b89cbb844d15077182bec11f99dd63b0b9ae1dbcefca54333c5132ac3 Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.100732 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5555f74b94-58bwl"] Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.349848 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-65dc8fcc6b-fns29"] Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.637507 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-klnj5"] Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.657422 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c3760278-593c-4aa1-9ab5-db3403795f2c","Type":"ContainerStarted","Data":"935b56bfa811c44bf3fe96f6a495cb964d38fa9d8e37d00b97df9921d4e176df"} Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.673613 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" event={"ID":"f3ff1d2b-0fc8-49dc-a02f-948b81d54988","Type":"ContainerStarted","Data":"6d96abda9b417c3740d6580f0588d232d76aa68341383664c78165cde4232769"} Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.675304 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-777b8bd98c-blh5p" event={"ID":"e5880470-8751-4613-82c7-33efabd35a6e","Type":"ContainerStarted","Data":"f66e599b89cbb844d15077182bec11f99dd63b0b9ae1dbcefca54333c5132ac3"} Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.688043 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"caf43f43-1632-4a05-902b-6c25b8dadf71","Type":"ContainerStarted","Data":"6df2a7ecf04d95c354794cb2f4e8b4f282fd1d005dbe3597d2e228968aeb5401"} Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.694932 4631 generic.go:334] "Generic (PLEG): container finished" podID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" containerID="ff8cc64bd9d22607dfc288c70f096ef509d908e83b2947abfe6601744ea0224f" exitCode=0 Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.694990 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" event={"ID":"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6","Type":"ContainerDied","Data":"ff8cc64bd9d22607dfc288c70f096ef509d908e83b2947abfe6601744ea0224f"} Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.704972 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.7049575279999996 podStartE2EDuration="6.704957528s" podCreationTimestamp="2025-11-29 04:31:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:14.700028258 +0000 UTC m=+1201.764531772" watchObservedRunningTime="2025-11-29 04:31:14.704957528 +0000 UTC m=+1201.769461042" Nov 29 04:31:14 crc kubenswrapper[4631]: I1129 04:31:14.723273 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.723259144 podStartE2EDuration="6.723259144s" podCreationTimestamp="2025-11-29 04:31:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:14.721165843 +0000 UTC m=+1201.785669357" watchObservedRunningTime="2025-11-29 04:31:14.723259144 +0000 UTC m=+1201.787762658" Nov 29 04:31:15 crc kubenswrapper[4631]: I1129 04:31:15.895532 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-fb967fbcd-pqplm"] Nov 29 04:31:15 crc kubenswrapper[4631]: I1129 04:31:15.897017 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:15 crc kubenswrapper[4631]: I1129 04:31:15.898731 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 29 04:31:15 crc kubenswrapper[4631]: I1129 04:31:15.898943 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 29 04:31:15 crc kubenswrapper[4631]: I1129 04:31:15.914437 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-fb967fbcd-pqplm"] Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.027638 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-logs\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.027742 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-config-data-custom\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.027775 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zx85v\" (UniqueName: \"kubernetes.io/projected/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-kube-api-access-zx85v\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.027793 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-internal-tls-certs\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.027815 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-config-data\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.027931 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-combined-ca-bundle\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.028037 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-public-tls-certs\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.129823 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-logs\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.130136 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-config-data-custom\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.130169 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zx85v\" (UniqueName: \"kubernetes.io/projected/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-kube-api-access-zx85v\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.130186 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-internal-tls-certs\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.130206 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-config-data\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.130224 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-combined-ca-bundle\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.130258 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-logs\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.130341 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-public-tls-certs\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.141465 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-config-data-custom\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.143614 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-internal-tls-certs\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.147355 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-config-data\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.148034 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-combined-ca-bundle\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.155752 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-public-tls-certs\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.174064 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zx85v\" (UniqueName: \"kubernetes.io/projected/f7aced22-8f95-4c19-b6c6-f56a84ae29e0-kube-api-access-zx85v\") pod \"barbican-api-fb967fbcd-pqplm\" (UID: \"f7aced22-8f95-4c19-b6c6-f56a84ae29e0\") " pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:16 crc kubenswrapper[4631]: I1129 04:31:16.218400 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.663057 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.663351 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.672175 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.672223 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.706979 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.712441 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.714315 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.729776 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.729939 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.730152 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:18 crc kubenswrapper[4631]: I1129 04:31:18.748672 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 29 04:31:19 crc kubenswrapper[4631]: I1129 04:31:19.674013 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" podUID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.151:5353: i/o timeout" Nov 29 04:31:19 crc kubenswrapper[4631]: I1129 04:31:19.740670 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 29 04:31:19 crc kubenswrapper[4631]: I1129 04:31:19.885180 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:31:19 crc kubenswrapper[4631]: I1129 04:31:19.993178 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtjpp\" (UniqueName: \"kubernetes.io/projected/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-kube-api-access-dtjpp\") pod \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " Nov 29 04:31:19 crc kubenswrapper[4631]: I1129 04:31:19.993221 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-nb\") pod \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " Nov 29 04:31:19 crc kubenswrapper[4631]: I1129 04:31:19.993296 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-svc\") pod \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " Nov 29 04:31:19 crc kubenswrapper[4631]: I1129 04:31:19.993379 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-sb\") pod \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " Nov 29 04:31:19 crc kubenswrapper[4631]: I1129 04:31:19.993420 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-config\") pod \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " Nov 29 04:31:19 crc kubenswrapper[4631]: I1129 04:31:19.993527 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-swift-storage-0\") pod \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\" (UID: \"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6\") " Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.011003 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-kube-api-access-dtjpp" (OuterVolumeSpecName: "kube-api-access-dtjpp") pod "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" (UID: "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6"). InnerVolumeSpecName "kube-api-access-dtjpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.054259 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" (UID: "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.071632 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" (UID: "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.091788 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" (UID: "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.095168 4631 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.095311 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtjpp\" (UniqueName: \"kubernetes.io/projected/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-kube-api-access-dtjpp\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.095405 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.095482 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.148065 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-config" (OuterVolumeSpecName: "config") pod "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" (UID: "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.149413 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" (UID: "5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.198089 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.198125 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.716412 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.716469 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.770228 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.770309 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" event={"ID":"5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6","Type":"ContainerDied","Data":"6394df9db4bed55b5f061381658da7a90b69536767f0aa1f428696e92e5853e2"} Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.770377 4631 scope.go:117] "RemoveContainer" containerID="ff8cc64bd9d22607dfc288c70f096ef509d908e83b2947abfe6601744ea0224f" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.770531 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.770543 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.770730 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.813394 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xrtmb"] Nov 29 04:31:20 crc kubenswrapper[4631]: I1129 04:31:20.823150 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xrtmb"] Nov 29 04:31:21 crc kubenswrapper[4631]: I1129 04:31:21.227843 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" path="/var/lib/kubelet/pods/5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6/volumes" Nov 29 04:31:21 crc kubenswrapper[4631]: W1129 04:31:21.613610 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod125ef941_9d17_4b6e_b395_1deac81e80d8.slice/crio-e3750ba542f5cabff1be0c70c2420433449f799a13c7b28203b6387b9d238c64 WatchSource:0}: Error finding container e3750ba542f5cabff1be0c70c2420433449f799a13c7b28203b6387b9d238c64: Status 404 returned error can't find the container with id e3750ba542f5cabff1be0c70c2420433449f799a13c7b28203b6387b9d238c64 Nov 29 04:31:21 crc kubenswrapper[4631]: I1129 04:31:21.784141 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65dc8fcc6b-fns29" event={"ID":"125ef941-9d17-4b6e-b395-1deac81e80d8","Type":"ContainerStarted","Data":"e3750ba542f5cabff1be0c70c2420433449f799a13c7b28203b6387b9d238c64"} Nov 29 04:31:21 crc kubenswrapper[4631]: I1129 04:31:21.785146 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:31:21 crc kubenswrapper[4631]: I1129 04:31:21.785162 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:31:21 crc kubenswrapper[4631]: I1129 04:31:21.785911 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" event={"ID":"1a245b27-d3b7-44b8-8680-d7381bf83f08","Type":"ContainerStarted","Data":"50824be3ab908e9d608a8a6ab27155a5b1741167da849169fc08887dfea9a12d"} Nov 29 04:31:22 crc kubenswrapper[4631]: I1129 04:31:22.100894 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:22 crc kubenswrapper[4631]: I1129 04:31:22.100968 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:31:22 crc kubenswrapper[4631]: I1129 04:31:22.111001 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 29 04:31:22 crc kubenswrapper[4631]: I1129 04:31:22.121188 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 29 04:31:22 crc kubenswrapper[4631]: I1129 04:31:22.296724 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 29 04:31:22 crc kubenswrapper[4631]: E1129 04:31:22.995443 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/ubi9/httpd-24:latest" Nov 29 04:31:22 crc kubenswrapper[4631]: E1129 04:31:22.995807 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:proxy-httpd,Image:registry.redhat.io/ubi9/httpd-24:latest,Command:[/usr/sbin/httpd],Args:[-DFOREGROUND],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:proxy-httpd,HostPort:0,ContainerPort:3000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf/httpd.conf,SubPath:httpd.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf.d/ssl.conf,SubPath:ssl.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:run-httpd,ReadOnly:false,MountPath:/run/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:log-httpd,ReadOnly:false,MountPath:/var/log/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9nw56,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(782a0b9b-d16f-495e-a648-e8a03af1e2d2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 29 04:31:23 crc kubenswrapper[4631]: E1129 04:31:23.000743 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"proxy-httpd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"]" pod="openstack/ceilometer-0" podUID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" Nov 29 04:31:23 crc kubenswrapper[4631]: I1129 04:31:23.143035 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 29 04:31:23 crc kubenswrapper[4631]: I1129 04:31:23.270282 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5964d597b6-rfcr2" podUID="e2a6410f-6c69-4b87-a247-b285aef98b71" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 29 04:31:23 crc kubenswrapper[4631]: I1129 04:31:23.544505 4631 scope.go:117] "RemoveContainer" containerID="adf289c7ed6772c8bf242408d1f93ee804fe13655763d22ac04eec6184e08d6a" Nov 29 04:31:23 crc kubenswrapper[4631]: I1129 04:31:23.813522 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" containerName="ceilometer-notification-agent" containerID="cri-o://ae3a0d81ae39a49fb920c6b1e172a23981afa1366441f0a3feacc58a766e9c20" gracePeriod=30 Nov 29 04:31:23 crc kubenswrapper[4631]: I1129 04:31:23.815405 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" containerName="sg-core" containerID="cri-o://6aafa51458bdfd083742597502d5cfbfc83b12e11f970844c626abb13f0be4f7" gracePeriod=30 Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.110191 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-fb967fbcd-pqplm"] Nov 29 04:31:24 crc kubenswrapper[4631]: W1129 04:31:24.343344 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7aced22_8f95_4c19_b6c6_f56a84ae29e0.slice/crio-d0566306c3add5bb0e9c6004258f2b6f24f702974186b36fcdea63f064f0477e WatchSource:0}: Error finding container d0566306c3add5bb0e9c6004258f2b6f24f702974186b36fcdea63f064f0477e: Status 404 returned error can't find the container with id d0566306c3add5bb0e9c6004258f2b6f24f702974186b36fcdea63f064f0477e Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.674971 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-55f844cf75-xrtmb" podUID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.151:5353: i/o timeout" Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.783307 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.845896 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" event={"ID":"f3ff1d2b-0fc8-49dc-a02f-948b81d54988","Type":"ContainerStarted","Data":"b0f4178ba22b0cae2ce946b23f727485d414bc0ad509a578833acd0a7dfcdae6"} Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.864545 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65dc8fcc6b-fns29" event={"ID":"125ef941-9d17-4b6e-b395-1deac81e80d8","Type":"ContainerStarted","Data":"56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2"} Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.885175 4631 generic.go:334] "Generic (PLEG): container finished" podID="1a245b27-d3b7-44b8-8680-d7381bf83f08" containerID="e516c6521dc3cb55b5f2a9cae60f33b26df85a9481672ab3e3f05105c4537381" exitCode=0 Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.885243 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" event={"ID":"1a245b27-d3b7-44b8-8680-d7381bf83f08","Type":"ContainerDied","Data":"e516c6521dc3cb55b5f2a9cae60f33b26df85a9481672ab3e3f05105c4537381"} Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.888986 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-777b8bd98c-blh5p" event={"ID":"e5880470-8751-4613-82c7-33efabd35a6e","Type":"ContainerStarted","Data":"1820870f91d23b4767bc43300eb5a6ac7367404e2a07a78ad3bda285b4ce855e"} Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.892660 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-fb967fbcd-pqplm" event={"ID":"f7aced22-8f95-4c19-b6c6-f56a84ae29e0","Type":"ContainerStarted","Data":"8b8bf23b8f1c985a29729f07538dcdf28004db6dfec92ef6e5f2ee7dba675579"} Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.892699 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-fb967fbcd-pqplm" event={"ID":"f7aced22-8f95-4c19-b6c6-f56a84ae29e0","Type":"ContainerStarted","Data":"d0566306c3add5bb0e9c6004258f2b6f24f702974186b36fcdea63f064f0477e"} Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.903263 4631 generic.go:334] "Generic (PLEG): container finished" podID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" containerID="6aafa51458bdfd083742597502d5cfbfc83b12e11f970844c626abb13f0be4f7" exitCode=2 Nov 29 04:31:24 crc kubenswrapper[4631]: I1129 04:31:24.903307 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"782a0b9b-d16f-495e-a648-e8a03af1e2d2","Type":"ContainerDied","Data":"6aafa51458bdfd083742597502d5cfbfc83b12e11f970844c626abb13f0be4f7"} Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.911811 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-777b8bd98c-blh5p" event={"ID":"e5880470-8751-4613-82c7-33efabd35a6e","Type":"ContainerStarted","Data":"c3965f0a67f1e2db324fb1720649e57ac7752a5f7712240f4b4d180b035abe30"} Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.915637 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-fb967fbcd-pqplm" event={"ID":"f7aced22-8f95-4c19-b6c6-f56a84ae29e0","Type":"ContainerStarted","Data":"f671209398db84d149e77f03864a8dbe7b9d1bab056f364bfd9935d67acf274a"} Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.916403 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.924496 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cztsz" event={"ID":"d646890b-5054-4ad5-9dc0-940a5e397fd0","Type":"ContainerStarted","Data":"ae7593444ad8c1b38ef9f29f4c11f00e074a6146749f2c448e6ca53e4d84d033"} Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.927187 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" event={"ID":"f3ff1d2b-0fc8-49dc-a02f-948b81d54988","Type":"ContainerStarted","Data":"8c45664c3d892fdf20a49328ad85daff376264e66f3039225224ba51f36096fc"} Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.930642 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65dc8fcc6b-fns29" event={"ID":"125ef941-9d17-4b6e-b395-1deac81e80d8","Type":"ContainerStarted","Data":"f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9"} Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.931122 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.931143 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.932301 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" event={"ID":"1a245b27-d3b7-44b8-8680-d7381bf83f08","Type":"ContainerStarted","Data":"403c2b0298a4cae2bb92f853395c2e218ecc714e12c1761ff2e9fa7eb921a3db"} Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.932470 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.942080 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-777b8bd98c-blh5p" podStartSLOduration=3.531864884 podStartE2EDuration="13.942064388s" podCreationTimestamp="2025-11-29 04:31:12 +0000 UTC" firstStartedPulling="2025-11-29 04:31:14.008994725 +0000 UTC m=+1201.073498239" lastFinishedPulling="2025-11-29 04:31:24.419194229 +0000 UTC m=+1211.483697743" observedRunningTime="2025-11-29 04:31:25.92864292 +0000 UTC m=+1212.993146434" watchObservedRunningTime="2025-11-29 04:31:25.942064388 +0000 UTC m=+1213.006567902" Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.959045 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-fb967fbcd-pqplm" podStartSLOduration=10.959027791 podStartE2EDuration="10.959027791s" podCreationTimestamp="2025-11-29 04:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:25.947543801 +0000 UTC m=+1213.012047315" watchObservedRunningTime="2025-11-29 04:31:25.959027791 +0000 UTC m=+1213.023531305" Nov 29 04:31:25 crc kubenswrapper[4631]: I1129 04:31:25.970922 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-5555f74b94-58bwl" podStartSLOduration=3.6434485519999997 podStartE2EDuration="13.970903s" podCreationTimestamp="2025-11-29 04:31:12 +0000 UTC" firstStartedPulling="2025-11-29 04:31:14.117493469 +0000 UTC m=+1201.181996973" lastFinishedPulling="2025-11-29 04:31:24.444947907 +0000 UTC m=+1211.509451421" observedRunningTime="2025-11-29 04:31:25.967930188 +0000 UTC m=+1213.032433702" watchObservedRunningTime="2025-11-29 04:31:25.970903 +0000 UTC m=+1213.035406514" Nov 29 04:31:26 crc kubenswrapper[4631]: I1129 04:31:26.000137 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-cztsz" podStartSLOduration=6.204031903 podStartE2EDuration="1m13.000120603s" podCreationTimestamp="2025-11-29 04:30:13 +0000 UTC" firstStartedPulling="2025-11-29 04:30:16.737228537 +0000 UTC m=+1143.801732051" lastFinishedPulling="2025-11-29 04:31:23.533317237 +0000 UTC m=+1210.597820751" observedRunningTime="2025-11-29 04:31:25.982924034 +0000 UTC m=+1213.047427548" watchObservedRunningTime="2025-11-29 04:31:26.000120603 +0000 UTC m=+1213.064624117" Nov 29 04:31:26 crc kubenswrapper[4631]: I1129 04:31:26.026720 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-65dc8fcc6b-fns29" podStartSLOduration=13.02669879 podStartE2EDuration="13.02669879s" podCreationTimestamp="2025-11-29 04:31:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:26.021634317 +0000 UTC m=+1213.086137851" watchObservedRunningTime="2025-11-29 04:31:26.02669879 +0000 UTC m=+1213.091202304" Nov 29 04:31:26 crc kubenswrapper[4631]: I1129 04:31:26.055504 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" podStartSLOduration=13.055489202 podStartE2EDuration="13.055489202s" podCreationTimestamp="2025-11-29 04:31:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:26.045274163 +0000 UTC m=+1213.109777677" watchObservedRunningTime="2025-11-29 04:31:26.055489202 +0000 UTC m=+1213.119992716" Nov 29 04:31:26 crc kubenswrapper[4631]: I1129 04:31:26.218665 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:28 crc kubenswrapper[4631]: I1129 04:31:28.965830 4631 generic.go:334] "Generic (PLEG): container finished" podID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" containerID="ae3a0d81ae39a49fb920c6b1e172a23981afa1366441f0a3feacc58a766e9c20" exitCode=0 Nov 29 04:31:28 crc kubenswrapper[4631]: I1129 04:31:28.965894 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"782a0b9b-d16f-495e-a648-e8a03af1e2d2","Type":"ContainerDied","Data":"ae3a0d81ae39a49fb920c6b1e172a23981afa1366441f0a3feacc58a766e9c20"} Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.154357 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.272182 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-scripts\") pod \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.272303 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-run-httpd\") pod \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.272404 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-log-httpd\") pod \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.272654 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-sg-core-conf-yaml\") pod \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.272694 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "782a0b9b-d16f-495e-a648-e8a03af1e2d2" (UID: "782a0b9b-d16f-495e-a648-e8a03af1e2d2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.272734 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-config-data\") pod \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.272785 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nw56\" (UniqueName: \"kubernetes.io/projected/782a0b9b-d16f-495e-a648-e8a03af1e2d2-kube-api-access-9nw56\") pod \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.272842 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-combined-ca-bundle\") pod \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\" (UID: \"782a0b9b-d16f-495e-a648-e8a03af1e2d2\") " Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.272961 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "782a0b9b-d16f-495e-a648-e8a03af1e2d2" (UID: "782a0b9b-d16f-495e-a648-e8a03af1e2d2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.273511 4631 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.273545 4631 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/782a0b9b-d16f-495e-a648-e8a03af1e2d2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.278684 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-scripts" (OuterVolumeSpecName: "scripts") pod "782a0b9b-d16f-495e-a648-e8a03af1e2d2" (UID: "782a0b9b-d16f-495e-a648-e8a03af1e2d2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.289491 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/782a0b9b-d16f-495e-a648-e8a03af1e2d2-kube-api-access-9nw56" (OuterVolumeSpecName: "kube-api-access-9nw56") pod "782a0b9b-d16f-495e-a648-e8a03af1e2d2" (UID: "782a0b9b-d16f-495e-a648-e8a03af1e2d2"). InnerVolumeSpecName "kube-api-access-9nw56". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.306123 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-config-data" (OuterVolumeSpecName: "config-data") pod "782a0b9b-d16f-495e-a648-e8a03af1e2d2" (UID: "782a0b9b-d16f-495e-a648-e8a03af1e2d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.308752 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "782a0b9b-d16f-495e-a648-e8a03af1e2d2" (UID: "782a0b9b-d16f-495e-a648-e8a03af1e2d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.312222 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "782a0b9b-d16f-495e-a648-e8a03af1e2d2" (UID: "782a0b9b-d16f-495e-a648-e8a03af1e2d2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.378797 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.379098 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nw56\" (UniqueName: \"kubernetes.io/projected/782a0b9b-d16f-495e-a648-e8a03af1e2d2-kube-api-access-9nw56\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.379287 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.379642 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.379821 4631 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/782a0b9b-d16f-495e-a648-e8a03af1e2d2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.986789 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"782a0b9b-d16f-495e-a648-e8a03af1e2d2","Type":"ContainerDied","Data":"e53ab7793075cb925762e89dfacc1068f15674e180599c6b79ff47823ed6dfe1"} Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.986864 4631 scope.go:117] "RemoveContainer" containerID="6aafa51458bdfd083742597502d5cfbfc83b12e11f970844c626abb13f0be4f7" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.987061 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.992954 4631 generic.go:334] "Generic (PLEG): container finished" podID="d646890b-5054-4ad5-9dc0-940a5e397fd0" containerID="ae7593444ad8c1b38ef9f29f4c11f00e074a6146749f2c448e6ca53e4d84d033" exitCode=0 Nov 29 04:31:29 crc kubenswrapper[4631]: I1129 04:31:29.993059 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cztsz" event={"ID":"d646890b-5054-4ad5-9dc0-940a5e397fd0","Type":"ContainerDied","Data":"ae7593444ad8c1b38ef9f29f4c11f00e074a6146749f2c448e6ca53e4d84d033"} Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.029237 4631 scope.go:117] "RemoveContainer" containerID="ae3a0d81ae39a49fb920c6b1e172a23981afa1366441f0a3feacc58a766e9c20" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.093950 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.119155 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.128998 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:30 crc kubenswrapper[4631]: E1129 04:31:30.129347 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" containerName="init" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.129359 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" containerName="init" Nov 29 04:31:30 crc kubenswrapper[4631]: E1129 04:31:30.129379 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" containerName="dnsmasq-dns" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.129385 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" containerName="dnsmasq-dns" Nov 29 04:31:30 crc kubenswrapper[4631]: E1129 04:31:30.129396 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" containerName="ceilometer-notification-agent" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.129404 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" containerName="ceilometer-notification-agent" Nov 29 04:31:30 crc kubenswrapper[4631]: E1129 04:31:30.129420 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" containerName="sg-core" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.129425 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" containerName="sg-core" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.129597 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" containerName="ceilometer-notification-agent" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.129612 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" containerName="sg-core" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.129628 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fcf4605-b6e4-4d37-9cbf-6f1daa0f32d6" containerName="dnsmasq-dns" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.133233 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.136136 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.138125 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.149395 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.193677 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.193754 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-run-httpd\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.193797 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-config-data\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.193881 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-scripts\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.193905 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.193969 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-log-httpd\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.194017 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xndsh\" (UniqueName: \"kubernetes.io/projected/8bda5459-96f0-4bc8-8db3-31d78e6ae551-kube-api-access-xndsh\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.295121 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-scripts\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.295550 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.296756 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-log-httpd\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.297208 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xndsh\" (UniqueName: \"kubernetes.io/projected/8bda5459-96f0-4bc8-8db3-31d78e6ae551-kube-api-access-xndsh\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.297522 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.297580 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-log-httpd\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.297974 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-run-httpd\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.300679 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-config-data\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.301469 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.298591 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-run-httpd\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.301558 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.304878 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-scripts\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.323255 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-config-data\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.333855 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xndsh\" (UniqueName: \"kubernetes.io/projected/8bda5459-96f0-4bc8-8db3-31d78e6ae551-kube-api-access-xndsh\") pod \"ceilometer-0\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " pod="openstack/ceilometer-0" Nov 29 04:31:30 crc kubenswrapper[4631]: I1129 04:31:30.464408 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.033383 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.049932 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7d7446b849-nsq65" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.136929 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f5f7c4c6d-j949k"] Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.137175 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f5f7c4c6d-j949k" podUID="61789a4f-3d8f-44c8-b8ea-5d43da626439" containerName="neutron-api" containerID="cri-o://ad95cea063e9d485d805de7dd1ad46aaf52c15e5c59f83e5f29be1e83c4ecbfc" gracePeriod=30 Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.137819 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f5f7c4c6d-j949k" podUID="61789a4f-3d8f-44c8-b8ea-5d43da626439" containerName="neutron-httpd" containerID="cri-o://cd7994f5257d63f63b3dec09c5413ad86d9a8349949a5d6affeb80dcec240d51" gracePeriod=30 Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.228516 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="782a0b9b-d16f-495e-a648-e8a03af1e2d2" path="/var/lib/kubelet/pods/782a0b9b-d16f-495e-a648-e8a03af1e2d2/volumes" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.508910 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cztsz" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.631376 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-scripts\") pod \"d646890b-5054-4ad5-9dc0-940a5e397fd0\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.631456 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2fgr\" (UniqueName: \"kubernetes.io/projected/d646890b-5054-4ad5-9dc0-940a5e397fd0-kube-api-access-m2fgr\") pod \"d646890b-5054-4ad5-9dc0-940a5e397fd0\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.631492 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-combined-ca-bundle\") pod \"d646890b-5054-4ad5-9dc0-940a5e397fd0\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.632229 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-db-sync-config-data\") pod \"d646890b-5054-4ad5-9dc0-940a5e397fd0\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.632268 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-config-data\") pod \"d646890b-5054-4ad5-9dc0-940a5e397fd0\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.632346 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d646890b-5054-4ad5-9dc0-940a5e397fd0-etc-machine-id\") pod \"d646890b-5054-4ad5-9dc0-940a5e397fd0\" (UID: \"d646890b-5054-4ad5-9dc0-940a5e397fd0\") " Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.632718 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d646890b-5054-4ad5-9dc0-940a5e397fd0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d646890b-5054-4ad5-9dc0-940a5e397fd0" (UID: "d646890b-5054-4ad5-9dc0-940a5e397fd0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.638764 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-scripts" (OuterVolumeSpecName: "scripts") pod "d646890b-5054-4ad5-9dc0-940a5e397fd0" (UID: "d646890b-5054-4ad5-9dc0-940a5e397fd0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.643476 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d646890b-5054-4ad5-9dc0-940a5e397fd0" (UID: "d646890b-5054-4ad5-9dc0-940a5e397fd0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.667538 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d646890b-5054-4ad5-9dc0-940a5e397fd0-kube-api-access-m2fgr" (OuterVolumeSpecName: "kube-api-access-m2fgr") pod "d646890b-5054-4ad5-9dc0-940a5e397fd0" (UID: "d646890b-5054-4ad5-9dc0-940a5e397fd0"). InnerVolumeSpecName "kube-api-access-m2fgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.719156 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d646890b-5054-4ad5-9dc0-940a5e397fd0" (UID: "d646890b-5054-4ad5-9dc0-940a5e397fd0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.734595 4631 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d646890b-5054-4ad5-9dc0-940a5e397fd0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.734623 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.734635 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2fgr\" (UniqueName: \"kubernetes.io/projected/d646890b-5054-4ad5-9dc0-940a5e397fd0-kube-api-access-m2fgr\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.734644 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.734654 4631 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.753426 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-config-data" (OuterVolumeSpecName: "config-data") pod "d646890b-5054-4ad5-9dc0-940a5e397fd0" (UID: "d646890b-5054-4ad5-9dc0-940a5e397fd0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:31 crc kubenswrapper[4631]: I1129 04:31:31.838691 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d646890b-5054-4ad5-9dc0-940a5e397fd0-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.034799 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-cztsz" event={"ID":"d646890b-5054-4ad5-9dc0-940a5e397fd0","Type":"ContainerDied","Data":"7cf43755c2614aab77515d1114e13f2829d88ef66924e856dc8015272e6c6ef8"} Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.035023 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cf43755c2614aab77515d1114e13f2829d88ef66924e856dc8015272e6c6ef8" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.034846 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-cztsz" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.039143 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bda5459-96f0-4bc8-8db3-31d78e6ae551","Type":"ContainerStarted","Data":"7b4a60b5e6c2f5bae26b271a31f1a2187daa529fae4697a12e9f29516f21a7c7"} Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.039205 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bda5459-96f0-4bc8-8db3-31d78e6ae551","Type":"ContainerStarted","Data":"2586cecc14ed3c45bd906f1fa4e3a4d17f8075e579444038412e69f3aa27409a"} Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.043499 4631 generic.go:334] "Generic (PLEG): container finished" podID="61789a4f-3d8f-44c8-b8ea-5d43da626439" containerID="cd7994f5257d63f63b3dec09c5413ad86d9a8349949a5d6affeb80dcec240d51" exitCode=0 Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.043539 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f5f7c4c6d-j949k" event={"ID":"61789a4f-3d8f-44c8-b8ea-5d43da626439","Type":"ContainerDied","Data":"cd7994f5257d63f63b3dec09c5413ad86d9a8349949a5d6affeb80dcec240d51"} Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.270394 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 04:31:32 crc kubenswrapper[4631]: E1129 04:31:32.291749 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d646890b-5054-4ad5-9dc0-940a5e397fd0" containerName="cinder-db-sync" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.291781 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="d646890b-5054-4ad5-9dc0-940a5e397fd0" containerName="cinder-db-sync" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.293163 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="d646890b-5054-4ad5-9dc0-940a5e397fd0" containerName="cinder-db-sync" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.310042 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.316744 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.316972 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.317236 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.317467 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-7dnj9" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.377468 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fb9229ec-b7be-4f22-86c1-410644b922ff-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.377508 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-scripts\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.377547 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.377621 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cp8pz\" (UniqueName: \"kubernetes.io/projected/fb9229ec-b7be-4f22-86c1-410644b922ff-kube-api-access-cp8pz\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.377677 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.377701 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.474446 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.478237 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fb9229ec-b7be-4f22-86c1-410644b922ff-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.478274 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-scripts\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.478301 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.478368 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cp8pz\" (UniqueName: \"kubernetes.io/projected/fb9229ec-b7be-4f22-86c1-410644b922ff-kube-api-access-cp8pz\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.478416 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.478435 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.483118 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fb9229ec-b7be-4f22-86c1-410644b922ff-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.498388 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-klnj5"] Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.498663 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" podUID="1a245b27-d3b7-44b8-8680-d7381bf83f08" containerName="dnsmasq-dns" containerID="cri-o://403c2b0298a4cae2bb92f853395c2e218ecc714e12c1761ff2e9fa7eb921a3db" gracePeriod=10 Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.507124 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.522669 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.524348 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.524891 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cp8pz\" (UniqueName: \"kubernetes.io/projected/fb9229ec-b7be-4f22-86c1-410644b922ff-kube-api-access-cp8pz\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.527577 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.538830 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-scripts\") pod \"cinder-scheduler-0\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.565548 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-hhxk2"] Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.567097 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.603662 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-hhxk2"] Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.658797 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.659348 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.661295 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.684833 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685110 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccsqg\" (UniqueName: \"kubernetes.io/projected/ba55533e-c673-440d-a30d-30af0b3a64be-kube-api-access-ccsqg\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685129 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba55533e-c673-440d-a30d-30af0b3a64be-logs\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685151 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data-custom\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685177 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-scripts\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685201 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685217 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685233 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685279 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba55533e-c673-440d-a30d-30af0b3a64be-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685308 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-config\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685364 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gncpn\" (UniqueName: \"kubernetes.io/projected/108ade1d-ac82-4183-99eb-761b54886da9-kube-api-access-gncpn\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685396 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.685415 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: E1129 04:31:32.688661 4631 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a245b27_d3b7_44b8_8680_d7381bf83f08.slice/crio-conmon-403c2b0298a4cae2bb92f853395c2e218ecc714e12c1761ff2e9fa7eb921a3db.scope\": RecentStats: unable to find data in memory cache]" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.689657 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.708105 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790410 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba55533e-c673-440d-a30d-30af0b3a64be-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790477 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-config\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790525 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gncpn\" (UniqueName: \"kubernetes.io/projected/108ade1d-ac82-4183-99eb-761b54886da9-kube-api-access-gncpn\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790566 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790584 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790615 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790639 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccsqg\" (UniqueName: \"kubernetes.io/projected/ba55533e-c673-440d-a30d-30af0b3a64be-kube-api-access-ccsqg\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790657 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba55533e-c673-440d-a30d-30af0b3a64be-logs\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790677 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data-custom\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790705 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-scripts\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790733 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790751 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.790768 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.799982 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.800442 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.801094 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-config\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.801360 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.801651 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.801691 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba55533e-c673-440d-a30d-30af0b3a64be-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.801928 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba55533e-c673-440d-a30d-30af0b3a64be-logs\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.815624 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.828134 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data-custom\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.832639 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gncpn\" (UniqueName: \"kubernetes.io/projected/108ade1d-ac82-4183-99eb-761b54886da9-kube-api-access-gncpn\") pod \"dnsmasq-dns-5c9776ccc5-hhxk2\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.834160 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.835401 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-scripts\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:32 crc kubenswrapper[4631]: I1129 04:31:32.841890 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccsqg\" (UniqueName: \"kubernetes.io/projected/ba55533e-c673-440d-a30d-30af0b3a64be-kube-api-access-ccsqg\") pod \"cinder-api-0\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " pod="openstack/cinder-api-0" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.039293 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.057160 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.130627 4631 generic.go:334] "Generic (PLEG): container finished" podID="1a245b27-d3b7-44b8-8680-d7381bf83f08" containerID="403c2b0298a4cae2bb92f853395c2e218ecc714e12c1761ff2e9fa7eb921a3db" exitCode=0 Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.130681 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" event={"ID":"1a245b27-d3b7-44b8-8680-d7381bf83f08","Type":"ContainerDied","Data":"403c2b0298a4cae2bb92f853395c2e218ecc714e12c1761ff2e9fa7eb921a3db"} Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.150718 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bda5459-96f0-4bc8-8db3-31d78e6ae551","Type":"ContainerStarted","Data":"84f1737cad87a2039fb73be53ae3acf724c65031dba2daf4e44fc70238c53ab4"} Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.244224 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.377268 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.416718 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-swift-storage-0\") pod \"1a245b27-d3b7-44b8-8680-d7381bf83f08\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.416750 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-svc\") pod \"1a245b27-d3b7-44b8-8680-d7381bf83f08\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.416770 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-nb\") pod \"1a245b27-d3b7-44b8-8680-d7381bf83f08\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.416829 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-config\") pod \"1a245b27-d3b7-44b8-8680-d7381bf83f08\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.416903 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rr4w\" (UniqueName: \"kubernetes.io/projected/1a245b27-d3b7-44b8-8680-d7381bf83f08-kube-api-access-6rr4w\") pod \"1a245b27-d3b7-44b8-8680-d7381bf83f08\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.416997 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-sb\") pod \"1a245b27-d3b7-44b8-8680-d7381bf83f08\" (UID: \"1a245b27-d3b7-44b8-8680-d7381bf83f08\") " Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.433503 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a245b27-d3b7-44b8-8680-d7381bf83f08-kube-api-access-6rr4w" (OuterVolumeSpecName: "kube-api-access-6rr4w") pod "1a245b27-d3b7-44b8-8680-d7381bf83f08" (UID: "1a245b27-d3b7-44b8-8680-d7381bf83f08"). InnerVolumeSpecName "kube-api-access-6rr4w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.519131 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rr4w\" (UniqueName: \"kubernetes.io/projected/1a245b27-d3b7-44b8-8680-d7381bf83f08-kube-api-access-6rr4w\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.614812 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1a245b27-d3b7-44b8-8680-d7381bf83f08" (UID: "1a245b27-d3b7-44b8-8680-d7381bf83f08"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.648213 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.653882 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-config" (OuterVolumeSpecName: "config") pod "1a245b27-d3b7-44b8-8680-d7381bf83f08" (UID: "1a245b27-d3b7-44b8-8680-d7381bf83f08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.668599 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1a245b27-d3b7-44b8-8680-d7381bf83f08" (UID: "1a245b27-d3b7-44b8-8680-d7381bf83f08"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.680385 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1a245b27-d3b7-44b8-8680-d7381bf83f08" (UID: "1a245b27-d3b7-44b8-8680-d7381bf83f08"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.686877 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1a245b27-d3b7-44b8-8680-d7381bf83f08" (UID: "1a245b27-d3b7-44b8-8680-d7381bf83f08"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.750503 4631 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.750534 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.750546 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.750557 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a245b27-d3b7-44b8-8680-d7381bf83f08-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:33 crc kubenswrapper[4631]: I1129 04:31:33.766678 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.015352 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-hhxk2"] Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.236576 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" event={"ID":"108ade1d-ac82-4183-99eb-761b54886da9","Type":"ContainerStarted","Data":"070e7fac880aa8741df152b03cdb1c69206949170b1aebf90f70213e87295718"} Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.252013 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" event={"ID":"1a245b27-d3b7-44b8-8680-d7381bf83f08","Type":"ContainerDied","Data":"50824be3ab908e9d608a8a6ab27155a5b1741167da849169fc08887dfea9a12d"} Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.252065 4631 scope.go:117] "RemoveContainer" containerID="403c2b0298a4cae2bb92f853395c2e218ecc714e12c1761ff2e9fa7eb921a3db" Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.252199 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-klnj5" Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.310005 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fb9229ec-b7be-4f22-86c1-410644b922ff","Type":"ContainerStarted","Data":"4906b243cf87c1be2bb910f0d64d85d54b18f6389f3c37be54542895133ab14a"} Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.401387 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bda5459-96f0-4bc8-8db3-31d78e6ae551","Type":"ContainerStarted","Data":"40f4aaa9a4abd911f787e56f8b1ec402522c4a18baf37b3d5c66bc129c19edcd"} Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.431392 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-klnj5"] Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.471681 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba55533e-c673-440d-a30d-30af0b3a64be","Type":"ContainerStarted","Data":"d43e753bdbd3381c27972acdc43657ec7f764f3b4d707b49b79211c3a541cc34"} Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.507809 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-klnj5"] Nov 29 04:31:34 crc kubenswrapper[4631]: I1129 04:31:34.528304 4631 scope.go:117] "RemoveContainer" containerID="e516c6521dc3cb55b5f2a9cae60f33b26df85a9481672ab3e3f05105c4537381" Nov 29 04:31:35 crc kubenswrapper[4631]: I1129 04:31:35.243523 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a245b27-d3b7-44b8-8680-d7381bf83f08" path="/var/lib/kubelet/pods/1a245b27-d3b7-44b8-8680-d7381bf83f08/volumes" Nov 29 04:31:35 crc kubenswrapper[4631]: I1129 04:31:35.497627 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba55533e-c673-440d-a30d-30af0b3a64be","Type":"ContainerStarted","Data":"7c655d625cd08b7b16cb7ad17af3bda74f23df0a263a18d1f797cb6f91d4930c"} Nov 29 04:31:35 crc kubenswrapper[4631]: I1129 04:31:35.498738 4631 generic.go:334] "Generic (PLEG): container finished" podID="108ade1d-ac82-4183-99eb-761b54886da9" containerID="1a5362cd150902794571cc7f4ea2f956239a31696486791db9726e59a3d22aec" exitCode=0 Nov 29 04:31:35 crc kubenswrapper[4631]: I1129 04:31:35.498781 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" event={"ID":"108ade1d-ac82-4183-99eb-761b54886da9","Type":"ContainerDied","Data":"1a5362cd150902794571cc7f4ea2f956239a31696486791db9726e59a3d22aec"} Nov 29 04:31:35 crc kubenswrapper[4631]: I1129 04:31:35.675664 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:35 crc kubenswrapper[4631]: I1129 04:31:35.677488 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.241883 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-fb967fbcd-pqplm" podUID="f7aced22-8f95-4c19-b6c6-f56a84ae29e0" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.241918 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-fb967fbcd-pqplm" podUID="f7aced22-8f95-4c19-b6c6-f56a84ae29e0" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.242003 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-fb967fbcd-pqplm" podUID="f7aced22-8f95-4c19-b6c6-f56a84ae29e0" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.531822 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fb9229ec-b7be-4f22-86c1-410644b922ff","Type":"ContainerStarted","Data":"270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348"} Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.536616 4631 generic.go:334] "Generic (PLEG): container finished" podID="61789a4f-3d8f-44c8-b8ea-5d43da626439" containerID="ad95cea063e9d485d805de7dd1ad46aaf52c15e5c59f83e5f29be1e83c4ecbfc" exitCode=0 Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.536677 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f5f7c4c6d-j949k" event={"ID":"61789a4f-3d8f-44c8-b8ea-5d43da626439","Type":"ContainerDied","Data":"ad95cea063e9d485d805de7dd1ad46aaf52c15e5c59f83e5f29be1e83c4ecbfc"} Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.549092 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bda5459-96f0-4bc8-8db3-31d78e6ae551","Type":"ContainerStarted","Data":"ff386d72ddff2c18deb37636dfd1679efb1f65106c01a1c516266f6bb05e1a31"} Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.550218 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.557815 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" event={"ID":"108ade1d-ac82-4183-99eb-761b54886da9","Type":"ContainerStarted","Data":"a5fbcc4c5d1d2bfb9e698ae2a45f79c748484820fe6b9822f2930f1aea302732"} Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.558376 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.579939 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.211295912 podStartE2EDuration="6.579921231s" podCreationTimestamp="2025-11-29 04:31:30 +0000 UTC" firstStartedPulling="2025-11-29 04:31:31.044608025 +0000 UTC m=+1218.109111549" lastFinishedPulling="2025-11-29 04:31:35.413233354 +0000 UTC m=+1222.477736868" observedRunningTime="2025-11-29 04:31:36.573203267 +0000 UTC m=+1223.637706781" watchObservedRunningTime="2025-11-29 04:31:36.579921231 +0000 UTC m=+1223.644424745" Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.607497 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" podStartSLOduration=4.607475633 podStartE2EDuration="4.607475633s" podCreationTimestamp="2025-11-29 04:31:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:36.592810615 +0000 UTC m=+1223.657314129" watchObservedRunningTime="2025-11-29 04:31:36.607475633 +0000 UTC m=+1223.671979147" Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.815544 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:36 crc kubenswrapper[4631]: I1129 04:31:36.815610 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.594953 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fb9229ec-b7be-4f22-86c1-410644b922ff","Type":"ContainerStarted","Data":"4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac"} Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.602738 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f5f7c4c6d-j949k" event={"ID":"61789a4f-3d8f-44c8-b8ea-5d43da626439","Type":"ContainerDied","Data":"90a5fb10ab91b1634b133da54f98fa13401d6aa2696e4adb7fb41a846f8f9032"} Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.602772 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90a5fb10ab91b1634b133da54f98fa13401d6aa2696e4adb7fb41a846f8f9032" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.604821 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ba55533e-c673-440d-a30d-30af0b3a64be" containerName="cinder-api-log" containerID="cri-o://7c655d625cd08b7b16cb7ad17af3bda74f23df0a263a18d1f797cb6f91d4930c" gracePeriod=30 Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.604863 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba55533e-c673-440d-a30d-30af0b3a64be","Type":"ContainerStarted","Data":"a30d4c7316ad1d9540f57c2b445b841f7711cec9a14cf310cba7a57566c0a82c"} Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.607882 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.604978 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ba55533e-c673-440d-a30d-30af0b3a64be" containerName="cinder-api" containerID="cri-o://a30d4c7316ad1d9540f57c2b445b841f7711cec9a14cf310cba7a57566c0a82c" gracePeriod=30 Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.618097 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.940551818 podStartE2EDuration="5.618084845s" podCreationTimestamp="2025-11-29 04:31:32 +0000 UTC" firstStartedPulling="2025-11-29 04:31:33.398509809 +0000 UTC m=+1220.463013323" lastFinishedPulling="2025-11-29 04:31:35.076042836 +0000 UTC m=+1222.140546350" observedRunningTime="2025-11-29 04:31:37.61498301 +0000 UTC m=+1224.679486524" watchObservedRunningTime="2025-11-29 04:31:37.618084845 +0000 UTC m=+1224.682588359" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.640808 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.640789789 podStartE2EDuration="5.640789789s" podCreationTimestamp="2025-11-29 04:31:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:37.636518674 +0000 UTC m=+1224.701022188" watchObservedRunningTime="2025-11-29 04:31:37.640789789 +0000 UTC m=+1224.705293303" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.659679 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.704917 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.814085 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-config\") pod \"61789a4f-3d8f-44c8-b8ea-5d43da626439\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.814153 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-ovndb-tls-certs\") pod \"61789a4f-3d8f-44c8-b8ea-5d43da626439\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.814259 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-httpd-config\") pod \"61789a4f-3d8f-44c8-b8ea-5d43da626439\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.814277 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccb7\" (UniqueName: \"kubernetes.io/projected/61789a4f-3d8f-44c8-b8ea-5d43da626439-kube-api-access-6ccb7\") pod \"61789a4f-3d8f-44c8-b8ea-5d43da626439\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.814379 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-combined-ca-bundle\") pod \"61789a4f-3d8f-44c8-b8ea-5d43da626439\" (UID: \"61789a4f-3d8f-44c8-b8ea-5d43da626439\") " Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.843810 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "61789a4f-3d8f-44c8-b8ea-5d43da626439" (UID: "61789a4f-3d8f-44c8-b8ea-5d43da626439"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.862318 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61789a4f-3d8f-44c8-b8ea-5d43da626439-kube-api-access-6ccb7" (OuterVolumeSpecName: "kube-api-access-6ccb7") pod "61789a4f-3d8f-44c8-b8ea-5d43da626439" (UID: "61789a4f-3d8f-44c8-b8ea-5d43da626439"). InnerVolumeSpecName "kube-api-access-6ccb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.918740 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-config" (OuterVolumeSpecName: "config") pod "61789a4f-3d8f-44c8-b8ea-5d43da626439" (UID: "61789a4f-3d8f-44c8-b8ea-5d43da626439"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.919760 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.919792 4631 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.919802 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccb7\" (UniqueName: \"kubernetes.io/projected/61789a4f-3d8f-44c8-b8ea-5d43da626439-kube-api-access-6ccb7\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.927456 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "61789a4f-3d8f-44c8-b8ea-5d43da626439" (UID: "61789a4f-3d8f-44c8-b8ea-5d43da626439"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:37 crc kubenswrapper[4631]: I1129 04:31:37.966233 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "61789a4f-3d8f-44c8-b8ea-5d43da626439" (UID: "61789a4f-3d8f-44c8-b8ea-5d43da626439"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.021233 4631 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.021260 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61789a4f-3d8f-44c8-b8ea-5d43da626439-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.149639 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.149719 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.150427 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"8b44c75824934f7b87d14cf3fa1b963da97caaa8b8cf8a2df430157835986df1"} pod="openstack/horizon-76fdc69464-qvs2b" containerMessage="Container horizon failed startup probe, will be restarted" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.150463 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" containerID="cri-o://8b44c75824934f7b87d14cf3fa1b963da97caaa8b8cf8a2df430157835986df1" gracePeriod=30 Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.273554 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5964d597b6-rfcr2" podUID="e2a6410f-6c69-4b87-a247-b285aef98b71" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.273629 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.274352 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"cbbb99ecfc0bc493701045852b7269c4bcff1a9e60435681b409c45bbed38032"} pod="openstack/horizon-5964d597b6-rfcr2" containerMessage="Container horizon failed startup probe, will be restarted" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.274383 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5964d597b6-rfcr2" podUID="e2a6410f-6c69-4b87-a247-b285aef98b71" containerName="horizon" containerID="cri-o://cbbb99ecfc0bc493701045852b7269c4bcff1a9e60435681b409c45bbed38032" gracePeriod=30 Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.411607 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.623959 4631 generic.go:334] "Generic (PLEG): container finished" podID="ba55533e-c673-440d-a30d-30af0b3a64be" containerID="a30d4c7316ad1d9540f57c2b445b841f7711cec9a14cf310cba7a57566c0a82c" exitCode=0 Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.624254 4631 generic.go:334] "Generic (PLEG): container finished" podID="ba55533e-c673-440d-a30d-30af0b3a64be" containerID="7c655d625cd08b7b16cb7ad17af3bda74f23df0a263a18d1f797cb6f91d4930c" exitCode=143 Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.624395 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f5f7c4c6d-j949k" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.624925 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba55533e-c673-440d-a30d-30af0b3a64be","Type":"ContainerDied","Data":"a30d4c7316ad1d9540f57c2b445b841f7711cec9a14cf310cba7a57566c0a82c"} Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.624978 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba55533e-c673-440d-a30d-30af0b3a64be","Type":"ContainerDied","Data":"7c655d625cd08b7b16cb7ad17af3bda74f23df0a263a18d1f797cb6f91d4930c"} Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.699467 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.715135 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f5f7c4c6d-j949k"] Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.725765 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7f5f7c4c6d-j949k"] Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.735414 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data-custom\") pod \"ba55533e-c673-440d-a30d-30af0b3a64be\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.735650 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba55533e-c673-440d-a30d-30af0b3a64be-logs\") pod \"ba55533e-c673-440d-a30d-30af0b3a64be\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.735752 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-scripts\") pod \"ba55533e-c673-440d-a30d-30af0b3a64be\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.735841 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-combined-ca-bundle\") pod \"ba55533e-c673-440d-a30d-30af0b3a64be\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.735936 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccsqg\" (UniqueName: \"kubernetes.io/projected/ba55533e-c673-440d-a30d-30af0b3a64be-kube-api-access-ccsqg\") pod \"ba55533e-c673-440d-a30d-30af0b3a64be\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.736003 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data\") pod \"ba55533e-c673-440d-a30d-30af0b3a64be\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.736107 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba55533e-c673-440d-a30d-30af0b3a64be-etc-machine-id\") pod \"ba55533e-c673-440d-a30d-30af0b3a64be\" (UID: \"ba55533e-c673-440d-a30d-30af0b3a64be\") " Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.735960 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba55533e-c673-440d-a30d-30af0b3a64be-logs" (OuterVolumeSpecName: "logs") pod "ba55533e-c673-440d-a30d-30af0b3a64be" (UID: "ba55533e-c673-440d-a30d-30af0b3a64be"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.737196 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ba55533e-c673-440d-a30d-30af0b3a64be-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ba55533e-c673-440d-a30d-30af0b3a64be" (UID: "ba55533e-c673-440d-a30d-30af0b3a64be"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.772200 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.773114 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ba55533e-c673-440d-a30d-30af0b3a64be" (UID: "ba55533e-c673-440d-a30d-30af0b3a64be"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.779499 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba55533e-c673-440d-a30d-30af0b3a64be-kube-api-access-ccsqg" (OuterVolumeSpecName: "kube-api-access-ccsqg") pod "ba55533e-c673-440d-a30d-30af0b3a64be" (UID: "ba55533e-c673-440d-a30d-30af0b3a64be"). InnerVolumeSpecName "kube-api-access-ccsqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.791317 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-scripts" (OuterVolumeSpecName: "scripts") pod "ba55533e-c673-440d-a30d-30af0b3a64be" (UID: "ba55533e-c673-440d-a30d-30af0b3a64be"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.809739 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.820940 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba55533e-c673-440d-a30d-30af0b3a64be" (UID: "ba55533e-c673-440d-a30d-30af0b3a64be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.840829 4631 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.840857 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba55533e-c673-440d-a30d-30af0b3a64be-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.840866 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.840875 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.840883 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccsqg\" (UniqueName: \"kubernetes.io/projected/ba55533e-c673-440d-a30d-30af0b3a64be-kube-api-access-ccsqg\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.840892 4631 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ba55533e-c673-440d-a30d-30af0b3a64be-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.882515 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data" (OuterVolumeSpecName: "config-data") pod "ba55533e-c673-440d-a30d-30af0b3a64be" (UID: "ba55533e-c673-440d-a30d-30af0b3a64be"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:38 crc kubenswrapper[4631]: I1129 04:31:38.944328 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba55533e-c673-440d-a30d-30af0b3a64be-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.225634 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61789a4f-3d8f-44c8-b8ea-5d43da626439" path="/var/lib/kubelet/pods/61789a4f-3d8f-44c8-b8ea-5d43da626439/volumes" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.634729 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ba55533e-c673-440d-a30d-30af0b3a64be","Type":"ContainerDied","Data":"d43e753bdbd3381c27972acdc43657ec7f764f3b4d707b49b79211c3a541cc34"} Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.634752 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.634802 4631 scope.go:117] "RemoveContainer" containerID="a30d4c7316ad1d9540f57c2b445b841f7711cec9a14cf310cba7a57566c0a82c" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.654689 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.660401 4631 scope.go:117] "RemoveContainer" containerID="7c655d625cd08b7b16cb7ad17af3bda74f23df0a263a18d1f797cb6f91d4930c" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.679693 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.691616 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 29 04:31:39 crc kubenswrapper[4631]: E1129 04:31:39.691937 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61789a4f-3d8f-44c8-b8ea-5d43da626439" containerName="neutron-httpd" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.691949 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="61789a4f-3d8f-44c8-b8ea-5d43da626439" containerName="neutron-httpd" Nov 29 04:31:39 crc kubenswrapper[4631]: E1129 04:31:39.691960 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba55533e-c673-440d-a30d-30af0b3a64be" containerName="cinder-api-log" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.691966 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba55533e-c673-440d-a30d-30af0b3a64be" containerName="cinder-api-log" Nov 29 04:31:39 crc kubenswrapper[4631]: E1129 04:31:39.691981 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61789a4f-3d8f-44c8-b8ea-5d43da626439" containerName="neutron-api" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.691988 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="61789a4f-3d8f-44c8-b8ea-5d43da626439" containerName="neutron-api" Nov 29 04:31:39 crc kubenswrapper[4631]: E1129 04:31:39.692005 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a245b27-d3b7-44b8-8680-d7381bf83f08" containerName="init" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.692011 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a245b27-d3b7-44b8-8680-d7381bf83f08" containerName="init" Nov 29 04:31:39 crc kubenswrapper[4631]: E1129 04:31:39.692022 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba55533e-c673-440d-a30d-30af0b3a64be" containerName="cinder-api" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.692028 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba55533e-c673-440d-a30d-30af0b3a64be" containerName="cinder-api" Nov 29 04:31:39 crc kubenswrapper[4631]: E1129 04:31:39.692045 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a245b27-d3b7-44b8-8680-d7381bf83f08" containerName="dnsmasq-dns" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.692050 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a245b27-d3b7-44b8-8680-d7381bf83f08" containerName="dnsmasq-dns" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.692309 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba55533e-c673-440d-a30d-30af0b3a64be" containerName="cinder-api-log" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.692323 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="61789a4f-3d8f-44c8-b8ea-5d43da626439" containerName="neutron-httpd" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.692397 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a245b27-d3b7-44b8-8680-d7381bf83f08" containerName="dnsmasq-dns" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.692411 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="61789a4f-3d8f-44c8-b8ea-5d43da626439" containerName="neutron-api" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.692423 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba55533e-c673-440d-a30d-30af0b3a64be" containerName="cinder-api" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.693269 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.695607 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.695859 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.698823 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.719009 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.756189 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.756253 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4bb283dc-d495-4398-a0d1-da97df47ffbd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.756276 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r5k9\" (UniqueName: \"kubernetes.io/projected/4bb283dc-d495-4398-a0d1-da97df47ffbd-kube-api-access-5r5k9\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.756349 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bb283dc-d495-4398-a0d1-da97df47ffbd-logs\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.756394 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-scripts\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.756453 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-config-data-custom\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.756485 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-public-tls-certs\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.756500 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.756514 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-config-data\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.857923 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-scripts\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.858002 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-config-data-custom\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.858024 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-public-tls-certs\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.858056 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-config-data\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.858075 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.858111 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.858152 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4bb283dc-d495-4398-a0d1-da97df47ffbd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.858174 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r5k9\" (UniqueName: \"kubernetes.io/projected/4bb283dc-d495-4398-a0d1-da97df47ffbd-kube-api-access-5r5k9\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.858229 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bb283dc-d495-4398-a0d1-da97df47ffbd-logs\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.858676 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4bb283dc-d495-4398-a0d1-da97df47ffbd-logs\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.858727 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4bb283dc-d495-4398-a0d1-da97df47ffbd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.862187 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-config-data-custom\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.863071 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.863405 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-scripts\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.869687 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-config-data\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.869995 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-public-tls-certs\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.881033 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4bb283dc-d495-4398-a0d1-da97df47ffbd-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:39 crc kubenswrapper[4631]: I1129 04:31:39.882825 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r5k9\" (UniqueName: \"kubernetes.io/projected/4bb283dc-d495-4398-a0d1-da97df47ffbd-kube-api-access-5r5k9\") pod \"cinder-api-0\" (UID: \"4bb283dc-d495-4398-a0d1-da97df47ffbd\") " pod="openstack/cinder-api-0" Nov 29 04:31:40 crc kubenswrapper[4631]: I1129 04:31:40.008795 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 29 04:31:40 crc kubenswrapper[4631]: I1129 04:31:40.497104 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:40 crc kubenswrapper[4631]: I1129 04:31:40.525399 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 29 04:31:40 crc kubenswrapper[4631]: I1129 04:31:40.681628 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4bb283dc-d495-4398-a0d1-da97df47ffbd","Type":"ContainerStarted","Data":"912020e3af5e18ed5b9ed15d17534dd546bb3e247573a8bc02e5a7c56c55b5c9"} Nov 29 04:31:40 crc kubenswrapper[4631]: I1129 04:31:40.786430 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6c8fbfb7d4-6m5ww" Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.234991 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba55533e-c673-440d-a30d-30af0b3a64be" path="/var/lib/kubelet/pods/ba55533e-c673-440d-a30d-30af0b3a64be/volumes" Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.261564 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-fb967fbcd-pqplm" podUID="f7aced22-8f95-4c19-b6c6-f56a84ae29e0" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.262376 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-fb967fbcd-pqplm" podUID="f7aced22-8f95-4c19-b6c6-f56a84ae29e0" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.262402 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-fb967fbcd-pqplm" podUID="f7aced22-8f95-4c19-b6c6-f56a84ae29e0" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.292077 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-fb967fbcd-pqplm" Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.412562 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-65dc8fcc6b-fns29"] Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.413010 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api-log" containerID="cri-o://56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2" gracePeriod=30 Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.413375 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api" containerID="cri-o://f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9" gracePeriod=30 Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.440258 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": EOF" Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.440267 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": EOF" Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.450565 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": EOF" Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.748642 4631 generic.go:334] "Generic (PLEG): container finished" podID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerID="56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2" exitCode=143 Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.748699 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65dc8fcc6b-fns29" event={"ID":"125ef941-9d17-4b6e-b395-1deac81e80d8","Type":"ContainerDied","Data":"56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2"} Nov 29 04:31:41 crc kubenswrapper[4631]: I1129 04:31:41.759973 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4bb283dc-d495-4398-a0d1-da97df47ffbd","Type":"ContainerStarted","Data":"67afd73419d00f9a3dca58036b7c083bdc64978e53d53b843e55de767843f714"} Nov 29 04:31:42 crc kubenswrapper[4631]: I1129 04:31:42.503701 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6ff47d4689-gnj7t" Nov 29 04:31:42 crc kubenswrapper[4631]: I1129 04:31:42.772904 4631 generic.go:334] "Generic (PLEG): container finished" podID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerID="8b44c75824934f7b87d14cf3fa1b963da97caaa8b8cf8a2df430157835986df1" exitCode=0 Nov 29 04:31:42 crc kubenswrapper[4631]: I1129 04:31:42.772960 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76fdc69464-qvs2b" event={"ID":"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14","Type":"ContainerDied","Data":"8b44c75824934f7b87d14cf3fa1b963da97caaa8b8cf8a2df430157835986df1"} Nov 29 04:31:42 crc kubenswrapper[4631]: I1129 04:31:42.774163 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4bb283dc-d495-4398-a0d1-da97df47ffbd","Type":"ContainerStarted","Data":"a61229d1fc532f352dd469d46ed74ccb9c0b469361f9700d485e50eae510ab0c"} Nov 29 04:31:42 crc kubenswrapper[4631]: I1129 04:31:42.775209 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 29 04:31:42 crc kubenswrapper[4631]: I1129 04:31:42.812768 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.812746367 podStartE2EDuration="3.812746367s" podCreationTimestamp="2025-11-29 04:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:42.799412492 +0000 UTC m=+1229.863916006" watchObservedRunningTime="2025-11-29 04:31:42.812746367 +0000 UTC m=+1229.877249881" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.041540 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.125391 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-2p6j8"] Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.125690 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" podUID="e6972f45-d644-4118-8c37-2ca075a65b12" containerName="dnsmasq-dns" containerID="cri-o://f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef" gracePeriod=10 Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.359440 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.415528 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.722946 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.784603 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76fdc69464-qvs2b" event={"ID":"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14","Type":"ContainerStarted","Data":"229305a7e7722d15999a5144c7f2a46b050e7800a04c60576976efcb74cd5340"} Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.787693 4631 generic.go:334] "Generic (PLEG): container finished" podID="e2a6410f-6c69-4b87-a247-b285aef98b71" containerID="cbbb99ecfc0bc493701045852b7269c4bcff1a9e60435681b409c45bbed38032" exitCode=0 Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.787774 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5964d597b6-rfcr2" event={"ID":"e2a6410f-6c69-4b87-a247-b285aef98b71","Type":"ContainerDied","Data":"cbbb99ecfc0bc493701045852b7269c4bcff1a9e60435681b409c45bbed38032"} Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.787825 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5964d597b6-rfcr2" event={"ID":"e2a6410f-6c69-4b87-a247-b285aef98b71","Type":"ContainerStarted","Data":"6b0852a471a04f56f6c58d087a251ca3eb79417543ab5c57ef78baa6daf8a698"} Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.793553 4631 generic.go:334] "Generic (PLEG): container finished" podID="e6972f45-d644-4118-8c37-2ca075a65b12" containerID="f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef" exitCode=0 Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.793731 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="fb9229ec-b7be-4f22-86c1-410644b922ff" containerName="cinder-scheduler" containerID="cri-o://270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348" gracePeriod=30 Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.793997 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.794221 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" event={"ID":"e6972f45-d644-4118-8c37-2ca075a65b12","Type":"ContainerDied","Data":"f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef"} Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.794247 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-2p6j8" event={"ID":"e6972f45-d644-4118-8c37-2ca075a65b12","Type":"ContainerDied","Data":"f489eeb6dffc11f675d4dfc792e89c46be7d5236374dce3f8c4ddef3cc40d39a"} Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.794262 4631 scope.go:117] "RemoveContainer" containerID="f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.794797 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="fb9229ec-b7be-4f22-86c1-410644b922ff" containerName="probe" containerID="cri-o://4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac" gracePeriod=30 Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.830915 4631 scope.go:117] "RemoveContainer" containerID="c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.860301 4631 scope.go:117] "RemoveContainer" containerID="f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef" Nov 29 04:31:43 crc kubenswrapper[4631]: E1129 04:31:43.862958 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef\": container with ID starting with f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef not found: ID does not exist" containerID="f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.862992 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef"} err="failed to get container status \"f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef\": rpc error: code = NotFound desc = could not find container \"f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef\": container with ID starting with f5bbb7756e122489fb3c10d0ec1ea409da1972c186d7e938883f151fdedf7cef not found: ID does not exist" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.863010 4631 scope.go:117] "RemoveContainer" containerID="c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6" Nov 29 04:31:43 crc kubenswrapper[4631]: E1129 04:31:43.864296 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6\": container with ID starting with c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6 not found: ID does not exist" containerID="c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.864415 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6"} err="failed to get container status \"c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6\": rpc error: code = NotFound desc = could not find container \"c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6\": container with ID starting with c4ff34a3d5ba65860de70f3f15018fea0598cb31355f64eaf3ed15e856b0a6c6 not found: ID does not exist" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.869397 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-dns-svc\") pod \"e6972f45-d644-4118-8c37-2ca075a65b12\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.869516 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-nb\") pod \"e6972f45-d644-4118-8c37-2ca075a65b12\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.869649 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-config\") pod \"e6972f45-d644-4118-8c37-2ca075a65b12\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.869737 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-sb\") pod \"e6972f45-d644-4118-8c37-2ca075a65b12\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.869924 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbcd9\" (UniqueName: \"kubernetes.io/projected/e6972f45-d644-4118-8c37-2ca075a65b12-kube-api-access-wbcd9\") pod \"e6972f45-d644-4118-8c37-2ca075a65b12\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.894537 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6972f45-d644-4118-8c37-2ca075a65b12-kube-api-access-wbcd9" (OuterVolumeSpecName: "kube-api-access-wbcd9") pod "e6972f45-d644-4118-8c37-2ca075a65b12" (UID: "e6972f45-d644-4118-8c37-2ca075a65b12"). InnerVolumeSpecName "kube-api-access-wbcd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.971979 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e6972f45-d644-4118-8c37-2ca075a65b12" (UID: "e6972f45-d644-4118-8c37-2ca075a65b12"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.972370 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-nb\") pod \"e6972f45-d644-4118-8c37-2ca075a65b12\" (UID: \"e6972f45-d644-4118-8c37-2ca075a65b12\") " Nov 29 04:31:43 crc kubenswrapper[4631]: W1129 04:31:43.972706 4631 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/e6972f45-d644-4118-8c37-2ca075a65b12/volumes/kubernetes.io~configmap/ovsdbserver-nb Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.972720 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e6972f45-d644-4118-8c37-2ca075a65b12" (UID: "e6972f45-d644-4118-8c37-2ca075a65b12"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.972977 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wbcd9\" (UniqueName: \"kubernetes.io/projected/e6972f45-d644-4118-8c37-2ca075a65b12-kube-api-access-wbcd9\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.973055 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.973847 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-config" (OuterVolumeSpecName: "config") pod "e6972f45-d644-4118-8c37-2ca075a65b12" (UID: "e6972f45-d644-4118-8c37-2ca075a65b12"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:43 crc kubenswrapper[4631]: I1129 04:31:43.983626 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e6972f45-d644-4118-8c37-2ca075a65b12" (UID: "e6972f45-d644-4118-8c37-2ca075a65b12"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:44 crc kubenswrapper[4631]: I1129 04:31:44.005858 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e6972f45-d644-4118-8c37-2ca075a65b12" (UID: "e6972f45-d644-4118-8c37-2ca075a65b12"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:44 crc kubenswrapper[4631]: I1129 04:31:44.074362 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:44 crc kubenswrapper[4631]: I1129 04:31:44.074605 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:44 crc kubenswrapper[4631]: I1129 04:31:44.074613 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6972f45-d644-4118-8c37-2ca075a65b12-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:44 crc kubenswrapper[4631]: I1129 04:31:44.123769 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-2p6j8"] Nov 29 04:31:44 crc kubenswrapper[4631]: I1129 04:31:44.130650 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-2p6j8"] Nov 29 04:31:44 crc kubenswrapper[4631]: I1129 04:31:44.803601 4631 generic.go:334] "Generic (PLEG): container finished" podID="fb9229ec-b7be-4f22-86c1-410644b922ff" containerID="4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac" exitCode=0 Nov 29 04:31:44 crc kubenswrapper[4631]: I1129 04:31:44.803634 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fb9229ec-b7be-4f22-86c1-410644b922ff","Type":"ContainerDied","Data":"4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac"} Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.251983 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6972f45-d644-4118-8c37-2ca075a65b12" path="/var/lib/kubelet/pods/e6972f45-d644-4118-8c37-2ca075a65b12/volumes" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.472561 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.602002 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data-custom\") pod \"fb9229ec-b7be-4f22-86c1-410644b922ff\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.602084 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fb9229ec-b7be-4f22-86c1-410644b922ff-etc-machine-id\") pod \"fb9229ec-b7be-4f22-86c1-410644b922ff\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.602116 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-combined-ca-bundle\") pod \"fb9229ec-b7be-4f22-86c1-410644b922ff\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.602144 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-scripts\") pod \"fb9229ec-b7be-4f22-86c1-410644b922ff\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.602180 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cp8pz\" (UniqueName: \"kubernetes.io/projected/fb9229ec-b7be-4f22-86c1-410644b922ff-kube-api-access-cp8pz\") pod \"fb9229ec-b7be-4f22-86c1-410644b922ff\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.602289 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data\") pod \"fb9229ec-b7be-4f22-86c1-410644b922ff\" (UID: \"fb9229ec-b7be-4f22-86c1-410644b922ff\") " Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.603777 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fb9229ec-b7be-4f22-86c1-410644b922ff-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "fb9229ec-b7be-4f22-86c1-410644b922ff" (UID: "fb9229ec-b7be-4f22-86c1-410644b922ff"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.612950 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "fb9229ec-b7be-4f22-86c1-410644b922ff" (UID: "fb9229ec-b7be-4f22-86c1-410644b922ff"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.618468 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb9229ec-b7be-4f22-86c1-410644b922ff-kube-api-access-cp8pz" (OuterVolumeSpecName: "kube-api-access-cp8pz") pod "fb9229ec-b7be-4f22-86c1-410644b922ff" (UID: "fb9229ec-b7be-4f22-86c1-410644b922ff"). InnerVolumeSpecName "kube-api-access-cp8pz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.631448 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-scripts" (OuterVolumeSpecName: "scripts") pod "fb9229ec-b7be-4f22-86c1-410644b922ff" (UID: "fb9229ec-b7be-4f22-86c1-410644b922ff"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.704049 4631 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fb9229ec-b7be-4f22-86c1-410644b922ff-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.704074 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.704083 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cp8pz\" (UniqueName: \"kubernetes.io/projected/fb9229ec-b7be-4f22-86c1-410644b922ff-kube-api-access-cp8pz\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.704092 4631 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.708677 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb9229ec-b7be-4f22-86c1-410644b922ff" (UID: "fb9229ec-b7be-4f22-86c1-410644b922ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.715393 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data" (OuterVolumeSpecName: "config-data") pod "fb9229ec-b7be-4f22-86c1-410644b922ff" (UID: "fb9229ec-b7be-4f22-86c1-410644b922ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.805369 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.805395 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9229ec-b7be-4f22-86c1-410644b922ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.820616 4631 generic.go:334] "Generic (PLEG): container finished" podID="fb9229ec-b7be-4f22-86c1-410644b922ff" containerID="270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348" exitCode=0 Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.820660 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fb9229ec-b7be-4f22-86c1-410644b922ff","Type":"ContainerDied","Data":"270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348"} Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.820689 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"fb9229ec-b7be-4f22-86c1-410644b922ff","Type":"ContainerDied","Data":"4906b243cf87c1be2bb910f0d64d85d54b18f6389f3c37be54542895133ab14a"} Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.820720 4631 scope.go:117] "RemoveContainer" containerID="4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.820879 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.864279 4631 scope.go:117] "RemoveContainer" containerID="270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.870665 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.879290 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.926467 4631 scope.go:117] "RemoveContainer" containerID="4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.926583 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 04:31:45 crc kubenswrapper[4631]: E1129 04:31:45.926965 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9229ec-b7be-4f22-86c1-410644b922ff" containerName="probe" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.926980 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9229ec-b7be-4f22-86c1-410644b922ff" containerName="probe" Nov 29 04:31:45 crc kubenswrapper[4631]: E1129 04:31:45.927010 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9229ec-b7be-4f22-86c1-410644b922ff" containerName="cinder-scheduler" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.927017 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9229ec-b7be-4f22-86c1-410644b922ff" containerName="cinder-scheduler" Nov 29 04:31:45 crc kubenswrapper[4631]: E1129 04:31:45.927031 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6972f45-d644-4118-8c37-2ca075a65b12" containerName="dnsmasq-dns" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.927037 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6972f45-d644-4118-8c37-2ca075a65b12" containerName="dnsmasq-dns" Nov 29 04:31:45 crc kubenswrapper[4631]: E1129 04:31:45.927050 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6972f45-d644-4118-8c37-2ca075a65b12" containerName="init" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.927056 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6972f45-d644-4118-8c37-2ca075a65b12" containerName="init" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.927227 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6972f45-d644-4118-8c37-2ca075a65b12" containerName="dnsmasq-dns" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.927249 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9229ec-b7be-4f22-86c1-410644b922ff" containerName="probe" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.927262 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9229ec-b7be-4f22-86c1-410644b922ff" containerName="cinder-scheduler" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.928842 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 04:31:45 crc kubenswrapper[4631]: E1129 04:31:45.929978 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac\": container with ID starting with 4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac not found: ID does not exist" containerID="4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.930018 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac"} err="failed to get container status \"4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac\": rpc error: code = NotFound desc = could not find container \"4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac\": container with ID starting with 4ce476e13a7c9427bf7893cd8c7b95ada167df6216cffe5662bd3a4293372dac not found: ID does not exist" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.930046 4631 scope.go:117] "RemoveContainer" containerID="270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348" Nov 29 04:31:45 crc kubenswrapper[4631]: E1129 04:31:45.933626 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348\": container with ID starting with 270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348 not found: ID does not exist" containerID="270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.933647 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348"} err="failed to get container status \"270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348\": rpc error: code = NotFound desc = could not find container \"270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348\": container with ID starting with 270f785192bd41d44baf97676ba63d63f5ac91a283451f67e2e4f987c8a1d348 not found: ID does not exist" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.933712 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 29 04:31:45 crc kubenswrapper[4631]: I1129 04:31:45.937477 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.009913 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-config-data\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.010010 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.010617 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.010660 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5jmw\" (UniqueName: \"kubernetes.io/projected/33fff273-753a-43d7-ad1d-3d8dd9d3f373-kube-api-access-r5jmw\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.010695 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/33fff273-753a-43d7-ad1d-3d8dd9d3f373-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.010744 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-scripts\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.112052 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.112591 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.112618 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5jmw\" (UniqueName: \"kubernetes.io/projected/33fff273-753a-43d7-ad1d-3d8dd9d3f373-kube-api-access-r5jmw\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.112650 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/33fff273-753a-43d7-ad1d-3d8dd9d3f373-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.112682 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-scripts\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.112763 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-config-data\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.112915 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/33fff273-753a-43d7-ad1d-3d8dd9d3f373-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.129944 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.130290 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5jmw\" (UniqueName: \"kubernetes.io/projected/33fff273-753a-43d7-ad1d-3d8dd9d3f373-kube-api-access-r5jmw\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.130394 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.130752 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-config-data\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.138915 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33fff273-753a-43d7-ad1d-3d8dd9d3f373-scripts\") pod \"cinder-scheduler-0\" (UID: \"33fff273-753a-43d7-ad1d-3d8dd9d3f373\") " pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.267770 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.688516 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.774170 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.775650 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.777065 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.777322 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-4hjjz" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.782621 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.793476 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.824283 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjck4\" (UniqueName: \"kubernetes.io/projected/367b380e-2a64-4044-b9c0-db854307d5be-kube-api-access-vjck4\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.824376 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config-secret\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.824421 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.824448 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-combined-ca-bundle\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.875358 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"33fff273-753a-43d7-ad1d-3d8dd9d3f373","Type":"ContainerStarted","Data":"fce29040bc92a8d153292e6cc4dcd5f1925d620cfb8cbe85f87a19ff7e6ccf81"} Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.925728 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config-secret\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.925796 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.925830 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-combined-ca-bundle\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.925913 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjck4\" (UniqueName: \"kubernetes.io/projected/367b380e-2a64-4044-b9c0-db854307d5be-kube-api-access-vjck4\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.927109 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.929682 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-combined-ca-bundle\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.944115 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjck4\" (UniqueName: \"kubernetes.io/projected/367b380e-2a64-4044-b9c0-db854307d5be-kube-api-access-vjck4\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:46 crc kubenswrapper[4631]: I1129 04:31:46.945048 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config-secret\") pod \"openstackclient\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.109791 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.226767 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb9229ec-b7be-4f22-86c1-410644b922ff" path="/var/lib/kubelet/pods/fb9229ec-b7be-4f22-86c1-410644b922ff/volumes" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.580958 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.619387 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.660417 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.661847 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: E1129 04:31:47.664295 4631 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 29 04:31:47 crc kubenswrapper[4631]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_367b380e-2a64-4044-b9c0-db854307d5be_0(25dc9ca82181eb11940c83c13e69456f6e4e45779405a96314637465b5b72a8d): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"25dc9ca82181eb11940c83c13e69456f6e4e45779405a96314637465b5b72a8d" Netns:"/var/run/netns/5ce39392-3972-4ad5-9548-76d78cbd748b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=25dc9ca82181eb11940c83c13e69456f6e4e45779405a96314637465b5b72a8d;K8S_POD_UID=367b380e-2a64-4044-b9c0-db854307d5be" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/367b380e-2a64-4044-b9c0-db854307d5be]: expected pod UID "367b380e-2a64-4044-b9c0-db854307d5be" but got "55accadf-0ac2-4a6e-a640-6b47845f939f" from Kube API Nov 29 04:31:47 crc kubenswrapper[4631]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 29 04:31:47 crc kubenswrapper[4631]: > Nov 29 04:31:47 crc kubenswrapper[4631]: E1129 04:31:47.664373 4631 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 29 04:31:47 crc kubenswrapper[4631]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_367b380e-2a64-4044-b9c0-db854307d5be_0(25dc9ca82181eb11940c83c13e69456f6e4e45779405a96314637465b5b72a8d): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"25dc9ca82181eb11940c83c13e69456f6e4e45779405a96314637465b5b72a8d" Netns:"/var/run/netns/5ce39392-3972-4ad5-9548-76d78cbd748b" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=25dc9ca82181eb11940c83c13e69456f6e4e45779405a96314637465b5b72a8d;K8S_POD_UID=367b380e-2a64-4044-b9c0-db854307d5be" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/367b380e-2a64-4044-b9c0-db854307d5be]: expected pod UID "367b380e-2a64-4044-b9c0-db854307d5be" but got "55accadf-0ac2-4a6e-a640-6b47845f939f" from Kube API Nov 29 04:31:47 crc kubenswrapper[4631]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 29 04:31:47 crc kubenswrapper[4631]: > pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.671944 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.744692 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/55accadf-0ac2-4a6e-a640-6b47845f939f-openstack-config\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.745100 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55accadf-0ac2-4a6e-a640-6b47845f939f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.745164 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v79s7\" (UniqueName: \"kubernetes.io/projected/55accadf-0ac2-4a6e-a640-6b47845f939f-kube-api-access-v79s7\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.745216 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/55accadf-0ac2-4a6e-a640-6b47845f939f-openstack-config-secret\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.846246 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55accadf-0ac2-4a6e-a640-6b47845f939f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.846313 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v79s7\" (UniqueName: \"kubernetes.io/projected/55accadf-0ac2-4a6e-a640-6b47845f939f-kube-api-access-v79s7\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.846361 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/55accadf-0ac2-4a6e-a640-6b47845f939f-openstack-config-secret\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.846408 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/55accadf-0ac2-4a6e-a640-6b47845f939f-openstack-config\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.847276 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/55accadf-0ac2-4a6e-a640-6b47845f939f-openstack-config\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.854770 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/55accadf-0ac2-4a6e-a640-6b47845f939f-openstack-config-secret\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.855018 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55accadf-0ac2-4a6e-a640-6b47845f939f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.863693 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v79s7\" (UniqueName: \"kubernetes.io/projected/55accadf-0ac2-4a6e-a640-6b47845f939f-kube-api-access-v79s7\") pod \"openstackclient\" (UID: \"55accadf-0ac2-4a6e-a640-6b47845f939f\") " pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.880649 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:37296->10.217.0.159:9311: read: connection reset by peer" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.880719 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:37304->10.217.0.159:9311: read: connection reset by peer" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.888819 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.889378 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"33fff273-753a-43d7-ad1d-3d8dd9d3f373","Type":"ContainerStarted","Data":"f1d3850b29d61678e91454797ab02c879d487c41539c2aa3ee8cc74e92773b22"} Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.892769 4631 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="367b380e-2a64-4044-b9c0-db854307d5be" podUID="55accadf-0ac2-4a6e-a640-6b47845f939f" Nov 29 04:31:47 crc kubenswrapper[4631]: I1129 04:31:47.972182 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.040803 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.049730 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config-secret\") pod \"367b380e-2a64-4044-b9c0-db854307d5be\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.049858 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjck4\" (UniqueName: \"kubernetes.io/projected/367b380e-2a64-4044-b9c0-db854307d5be-kube-api-access-vjck4\") pod \"367b380e-2a64-4044-b9c0-db854307d5be\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.050416 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-combined-ca-bundle\") pod \"367b380e-2a64-4044-b9c0-db854307d5be\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.050476 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config\") pod \"367b380e-2a64-4044-b9c0-db854307d5be\" (UID: \"367b380e-2a64-4044-b9c0-db854307d5be\") " Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.051194 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "367b380e-2a64-4044-b9c0-db854307d5be" (UID: "367b380e-2a64-4044-b9c0-db854307d5be"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.054555 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/367b380e-2a64-4044-b9c0-db854307d5be-kube-api-access-vjck4" (OuterVolumeSpecName: "kube-api-access-vjck4") pod "367b380e-2a64-4044-b9c0-db854307d5be" (UID: "367b380e-2a64-4044-b9c0-db854307d5be"). InnerVolumeSpecName "kube-api-access-vjck4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.055508 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "367b380e-2a64-4044-b9c0-db854307d5be" (UID: "367b380e-2a64-4044-b9c0-db854307d5be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.061466 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "367b380e-2a64-4044-b9c0-db854307d5be" (UID: "367b380e-2a64-4044-b9c0-db854307d5be"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.152467 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.152496 4631 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.152505 4631 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/367b380e-2a64-4044-b9c0-db854307d5be-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.152516 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjck4\" (UniqueName: \"kubernetes.io/projected/367b380e-2a64-4044-b9c0-db854307d5be-kube-api-access-vjck4\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.526493 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 29 04:31:48 crc kubenswrapper[4631]: W1129 04:31:48.534611 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55accadf_0ac2_4a6e_a640_6b47845f939f.slice/crio-cee06a396164d00891f5910239c56e9d07df55e4ece05f0b89eba5d525834bb7 WatchSource:0}: Error finding container cee06a396164d00891f5910239c56e9d07df55e4ece05f0b89eba5d525834bb7: Status 404 returned error can't find the container with id cee06a396164d00891f5910239c56e9d07df55e4ece05f0b89eba5d525834bb7 Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.861196 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.899590 4631 generic.go:334] "Generic (PLEG): container finished" podID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerID="f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9" exitCode=0 Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.899646 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65dc8fcc6b-fns29" event={"ID":"125ef941-9d17-4b6e-b395-1deac81e80d8","Type":"ContainerDied","Data":"f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9"} Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.899675 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-65dc8fcc6b-fns29" event={"ID":"125ef941-9d17-4b6e-b395-1deac81e80d8","Type":"ContainerDied","Data":"e3750ba542f5cabff1be0c70c2420433449f799a13c7b28203b6387b9d238c64"} Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.899690 4631 scope.go:117] "RemoveContainer" containerID="f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.899770 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-65dc8fcc6b-fns29" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.901154 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"55accadf-0ac2-4a6e-a640-6b47845f939f","Type":"ContainerStarted","Data":"cee06a396164d00891f5910239c56e9d07df55e4ece05f0b89eba5d525834bb7"} Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.903344 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.908004 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"33fff273-753a-43d7-ad1d-3d8dd9d3f373","Type":"ContainerStarted","Data":"8aeb0f25991517f8707f2e5cf83909959551cd44a0a8236f49d1ae21880827b4"} Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.927239 4631 scope.go:117] "RemoveContainer" containerID="56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.944903 4631 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="367b380e-2a64-4044-b9c0-db854307d5be" podUID="55accadf-0ac2-4a6e-a640-6b47845f939f" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.950968 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.950942674 podStartE2EDuration="3.950942674s" podCreationTimestamp="2025-11-29 04:31:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:48.932827541 +0000 UTC m=+1235.997331055" watchObservedRunningTime="2025-11-29 04:31:48.950942674 +0000 UTC m=+1236.015446188" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.957771 4631 scope.go:117] "RemoveContainer" containerID="f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9" Nov 29 04:31:48 crc kubenswrapper[4631]: E1129 04:31:48.958266 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9\": container with ID starting with f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9 not found: ID does not exist" containerID="f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.958323 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9"} err="failed to get container status \"f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9\": rpc error: code = NotFound desc = could not find container \"f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9\": container with ID starting with f42e72f667ee3c45cc2654fd53719e1cc4a191a3bd26104b843149d830f8dfb9 not found: ID does not exist" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.958365 4631 scope.go:117] "RemoveContainer" containerID="56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2" Nov 29 04:31:48 crc kubenswrapper[4631]: E1129 04:31:48.958810 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2\": container with ID starting with 56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2 not found: ID does not exist" containerID="56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.958835 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2"} err="failed to get container status \"56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2\": rpc error: code = NotFound desc = could not find container \"56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2\": container with ID starting with 56329e3804d9dbf69739c15882bbba63b85888c0aa50c994352d09f5b04e21f2 not found: ID does not exist" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.968687 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-combined-ca-bundle\") pod \"125ef941-9d17-4b6e-b395-1deac81e80d8\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.968836 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wq77x\" (UniqueName: \"kubernetes.io/projected/125ef941-9d17-4b6e-b395-1deac81e80d8-kube-api-access-wq77x\") pod \"125ef941-9d17-4b6e-b395-1deac81e80d8\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.968881 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data-custom\") pod \"125ef941-9d17-4b6e-b395-1deac81e80d8\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.968910 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data\") pod \"125ef941-9d17-4b6e-b395-1deac81e80d8\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.969108 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/125ef941-9d17-4b6e-b395-1deac81e80d8-logs\") pod \"125ef941-9d17-4b6e-b395-1deac81e80d8\" (UID: \"125ef941-9d17-4b6e-b395-1deac81e80d8\") " Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.970783 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/125ef941-9d17-4b6e-b395-1deac81e80d8-logs" (OuterVolumeSpecName: "logs") pod "125ef941-9d17-4b6e-b395-1deac81e80d8" (UID: "125ef941-9d17-4b6e-b395-1deac81e80d8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.973705 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/125ef941-9d17-4b6e-b395-1deac81e80d8-kube-api-access-wq77x" (OuterVolumeSpecName: "kube-api-access-wq77x") pod "125ef941-9d17-4b6e-b395-1deac81e80d8" (UID: "125ef941-9d17-4b6e-b395-1deac81e80d8"). InnerVolumeSpecName "kube-api-access-wq77x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:48 crc kubenswrapper[4631]: I1129 04:31:48.988820 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "125ef941-9d17-4b6e-b395-1deac81e80d8" (UID: "125ef941-9d17-4b6e-b395-1deac81e80d8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:49 crc kubenswrapper[4631]: I1129 04:31:49.009411 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "125ef941-9d17-4b6e-b395-1deac81e80d8" (UID: "125ef941-9d17-4b6e-b395-1deac81e80d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:49 crc kubenswrapper[4631]: I1129 04:31:49.052476 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data" (OuterVolumeSpecName: "config-data") pod "125ef941-9d17-4b6e-b395-1deac81e80d8" (UID: "125ef941-9d17-4b6e-b395-1deac81e80d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:49 crc kubenswrapper[4631]: I1129 04:31:49.070878 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/125ef941-9d17-4b6e-b395-1deac81e80d8-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:49 crc kubenswrapper[4631]: I1129 04:31:49.071138 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:49 crc kubenswrapper[4631]: I1129 04:31:49.071149 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wq77x\" (UniqueName: \"kubernetes.io/projected/125ef941-9d17-4b6e-b395-1deac81e80d8-kube-api-access-wq77x\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:49 crc kubenswrapper[4631]: I1129 04:31:49.071158 4631 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:49 crc kubenswrapper[4631]: I1129 04:31:49.071167 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/125ef941-9d17-4b6e-b395-1deac81e80d8-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:49 crc kubenswrapper[4631]: I1129 04:31:49.225615 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="367b380e-2a64-4044-b9c0-db854307d5be" path="/var/lib/kubelet/pods/367b380e-2a64-4044-b9c0-db854307d5be/volumes" Nov 29 04:31:49 crc kubenswrapper[4631]: I1129 04:31:49.233852 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-65dc8fcc6b-fns29"] Nov 29 04:31:49 crc kubenswrapper[4631]: I1129 04:31:49.243919 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-65dc8fcc6b-fns29"] Nov 29 04:31:50 crc kubenswrapper[4631]: I1129 04:31:50.715895 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:31:50 crc kubenswrapper[4631]: I1129 04:31:50.716175 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.028004 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-76cbc8bc95-pd9d4"] Nov 29 04:31:51 crc kubenswrapper[4631]: E1129 04:31:51.031257 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api-log" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.031392 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api-log" Nov 29 04:31:51 crc kubenswrapper[4631]: E1129 04:31:51.031463 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.031525 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.031758 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api-log" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.031833 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.032731 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.037449 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.037557 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.037859 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.087387 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-76cbc8bc95-pd9d4"] Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.103658 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-log-httpd\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.103710 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-config-data\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.103745 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-combined-ca-bundle\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.103774 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwxlj\" (UniqueName: \"kubernetes.io/projected/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-kube-api-access-rwxlj\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.103802 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-internal-tls-certs\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.103850 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-public-tls-certs\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.103921 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-etc-swift\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.103937 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-run-httpd\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.205576 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-log-httpd\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.206078 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-log-httpd\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.206138 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-config-data\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.206885 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-combined-ca-bundle\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.206921 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwxlj\" (UniqueName: \"kubernetes.io/projected/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-kube-api-access-rwxlj\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.206952 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-internal-tls-certs\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.207002 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-public-tls-certs\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.207078 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-etc-swift\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.207098 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-run-httpd\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.207379 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-run-httpd\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.217979 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-public-tls-certs\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.220828 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-internal-tls-certs\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.231888 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwxlj\" (UniqueName: \"kubernetes.io/projected/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-kube-api-access-rwxlj\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.242391 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-config-data\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.243124 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-combined-ca-bundle\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.243308 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dfe18059-91e5-40e0-a1df-f5f56cf4c0d2-etc-swift\") pod \"swift-proxy-76cbc8bc95-pd9d4\" (UID: \"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2\") " pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.258688 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" path="/var/lib/kubelet/pods/125ef941-9d17-4b6e-b395-1deac81e80d8/volumes" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.268982 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.364722 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.839648 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.840142 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="ceilometer-central-agent" containerID="cri-o://7b4a60b5e6c2f5bae26b271a31f1a2187daa529fae4697a12e9f29516f21a7c7" gracePeriod=30 Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.840809 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="proxy-httpd" containerID="cri-o://ff386d72ddff2c18deb37636dfd1679efb1f65106c01a1c516266f6bb05e1a31" gracePeriod=30 Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.840852 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="sg-core" containerID="cri-o://40f4aaa9a4abd911f787e56f8b1ec402522c4a18baf37b3d5c66bc129c19edcd" gracePeriod=30 Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.840896 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="ceilometer-notification-agent" containerID="cri-o://84f1737cad87a2039fb73be53ae3acf724c65031dba2daf4e44fc70238c53ab4" gracePeriod=30 Nov 29 04:31:51 crc kubenswrapper[4631]: I1129 04:31:51.849551 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.087842 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-76cbc8bc95-pd9d4"] Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.948429 4631 generic.go:334] "Generic (PLEG): container finished" podID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerID="ff386d72ddff2c18deb37636dfd1679efb1f65106c01a1c516266f6bb05e1a31" exitCode=0 Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.948874 4631 generic.go:334] "Generic (PLEG): container finished" podID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerID="40f4aaa9a4abd911f787e56f8b1ec402522c4a18baf37b3d5c66bc129c19edcd" exitCode=2 Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.948881 4631 generic.go:334] "Generic (PLEG): container finished" podID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerID="84f1737cad87a2039fb73be53ae3acf724c65031dba2daf4e44fc70238c53ab4" exitCode=0 Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.948887 4631 generic.go:334] "Generic (PLEG): container finished" podID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerID="7b4a60b5e6c2f5bae26b271a31f1a2187daa529fae4697a12e9f29516f21a7c7" exitCode=0 Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.948923 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bda5459-96f0-4bc8-8db3-31d78e6ae551","Type":"ContainerDied","Data":"ff386d72ddff2c18deb37636dfd1679efb1f65106c01a1c516266f6bb05e1a31"} Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.948948 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bda5459-96f0-4bc8-8db3-31d78e6ae551","Type":"ContainerDied","Data":"40f4aaa9a4abd911f787e56f8b1ec402522c4a18baf37b3d5c66bc129c19edcd"} Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.948957 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bda5459-96f0-4bc8-8db3-31d78e6ae551","Type":"ContainerDied","Data":"84f1737cad87a2039fb73be53ae3acf724c65031dba2daf4e44fc70238c53ab4"} Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.948965 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bda5459-96f0-4bc8-8db3-31d78e6ae551","Type":"ContainerDied","Data":"7b4a60b5e6c2f5bae26b271a31f1a2187daa529fae4697a12e9f29516f21a7c7"} Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.958801 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" event={"ID":"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2","Type":"ContainerStarted","Data":"5ae80a30418da5339dc70c632aa3ada4da4e3584a82e03b92843c7b403f110ba"} Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.958840 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" event={"ID":"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2","Type":"ContainerStarted","Data":"88ab331e72ae6888ccf2d556ba331f6915822393c6ff90c2c8458f6143bfbd16"} Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.958849 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" event={"ID":"dfe18059-91e5-40e0-a1df-f5f56cf4c0d2","Type":"ContainerStarted","Data":"99d8ca30a0c9e6e49e064ed35313ecb7197d96f9672d3156624b324e05cf722f"} Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.960104 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:52 crc kubenswrapper[4631]: I1129 04:31:52.960126 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.130259 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.141919 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.142384 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.144034 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.149661 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-run-httpd\") pod \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.149718 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-log-httpd\") pod \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.149739 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-config-data\") pod \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.149777 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-combined-ca-bundle\") pod \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.149903 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-sg-core-conf-yaml\") pod \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.149967 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-scripts\") pod \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.150014 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xndsh\" (UniqueName: \"kubernetes.io/projected/8bda5459-96f0-4bc8-8db3-31d78e6ae551-kube-api-access-xndsh\") pod \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\" (UID: \"8bda5459-96f0-4bc8-8db3-31d78e6ae551\") " Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.151672 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8bda5459-96f0-4bc8-8db3-31d78e6ae551" (UID: "8bda5459-96f0-4bc8-8db3-31d78e6ae551"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.152171 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8bda5459-96f0-4bc8-8db3-31d78e6ae551" (UID: "8bda5459-96f0-4bc8-8db3-31d78e6ae551"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.166493 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bda5459-96f0-4bc8-8db3-31d78e6ae551-kube-api-access-xndsh" (OuterVolumeSpecName: "kube-api-access-xndsh") pod "8bda5459-96f0-4bc8-8db3-31d78e6ae551" (UID: "8bda5459-96f0-4bc8-8db3-31d78e6ae551"). InnerVolumeSpecName "kube-api-access-xndsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.166565 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-scripts" (OuterVolumeSpecName: "scripts") pod "8bda5459-96f0-4bc8-8db3-31d78e6ae551" (UID: "8bda5459-96f0-4bc8-8db3-31d78e6ae551"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.176863 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" podStartSLOduration=2.17684565 podStartE2EDuration="2.17684565s" podCreationTimestamp="2025-11-29 04:31:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:31:52.987549484 +0000 UTC m=+1240.052052988" watchObservedRunningTime="2025-11-29 04:31:53.17684565 +0000 UTC m=+1240.241349164" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.184559 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.251754 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.263236 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xndsh\" (UniqueName: \"kubernetes.io/projected/8bda5459-96f0-4bc8-8db3-31d78e6ae551-kube-api-access-xndsh\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.263312 4631 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.263423 4631 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8bda5459-96f0-4bc8-8db3-31d78e6ae551-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.300990 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5964d597b6-rfcr2" podUID="e2a6410f-6c69-4b87-a247-b285aef98b71" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.313920 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.313962 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.336344 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8bda5459-96f0-4bc8-8db3-31d78e6ae551" (UID: "8bda5459-96f0-4bc8-8db3-31d78e6ae551"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.372142 4631 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.400515 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8bda5459-96f0-4bc8-8db3-31d78e6ae551" (UID: "8bda5459-96f0-4bc8-8db3-31d78e6ae551"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.474270 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.496149 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-config-data" (OuterVolumeSpecName: "config-data") pod "8bda5459-96f0-4bc8-8db3-31d78e6ae551" (UID: "8bda5459-96f0-4bc8-8db3-31d78e6ae551"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.575889 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8bda5459-96f0-4bc8-8db3-31d78e6ae551-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.729830 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.729852 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-65dc8fcc6b-fns29" podUID="125ef941-9d17-4b6e-b395-1deac81e80d8" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.977579 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.978488 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8bda5459-96f0-4bc8-8db3-31d78e6ae551","Type":"ContainerDied","Data":"2586cecc14ed3c45bd906f1fa4e3a4d17f8075e579444038412e69f3aa27409a"} Nov 29 04:31:53 crc kubenswrapper[4631]: I1129 04:31:53.978534 4631 scope.go:117] "RemoveContainer" containerID="ff386d72ddff2c18deb37636dfd1679efb1f65106c01a1c516266f6bb05e1a31" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.017715 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.033426 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.037954 4631 scope.go:117] "RemoveContainer" containerID="40f4aaa9a4abd911f787e56f8b1ec402522c4a18baf37b3d5c66bc129c19edcd" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.052097 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:54 crc kubenswrapper[4631]: E1129 04:31:54.053056 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="sg-core" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.053073 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="sg-core" Nov 29 04:31:54 crc kubenswrapper[4631]: E1129 04:31:54.053083 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="proxy-httpd" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.053089 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="proxy-httpd" Nov 29 04:31:54 crc kubenswrapper[4631]: E1129 04:31:54.053126 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="ceilometer-notification-agent" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.053133 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="ceilometer-notification-agent" Nov 29 04:31:54 crc kubenswrapper[4631]: E1129 04:31:54.053145 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="ceilometer-central-agent" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.053222 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="ceilometer-central-agent" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.053501 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="ceilometer-notification-agent" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.053538 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="sg-core" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.053555 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="ceilometer-central-agent" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.053596 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" containerName="proxy-httpd" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.056270 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.060349 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.061824 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.062829 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.093621 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-log-httpd\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.093675 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.093708 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-config-data\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.093724 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-scripts\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.093840 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4th8\" (UniqueName: \"kubernetes.io/projected/a4cb5e84-b5ce-4047-af12-06bf698ab02b-kube-api-access-l4th8\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.093892 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-run-httpd\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.093923 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.152842 4631 scope.go:117] "RemoveContainer" containerID="84f1737cad87a2039fb73be53ae3acf724c65031dba2daf4e44fc70238c53ab4" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.207212 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.207257 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-config-data\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.207280 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-scripts\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.207360 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4th8\" (UniqueName: \"kubernetes.io/projected/a4cb5e84-b5ce-4047-af12-06bf698ab02b-kube-api-access-l4th8\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.207405 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-run-httpd\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.207430 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.207492 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-log-httpd\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.209847 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-log-httpd\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.210325 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-run-httpd\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.219973 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-scripts\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.225774 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.226840 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-config-data\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.237871 4631 scope.go:117] "RemoveContainer" containerID="7b4a60b5e6c2f5bae26b271a31f1a2187daa529fae4697a12e9f29516f21a7c7" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.240102 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.244030 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4th8\" (UniqueName: \"kubernetes.io/projected/a4cb5e84-b5ce-4047-af12-06bf698ab02b-kube-api-access-l4th8\") pod \"ceilometer-0\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.427507 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:31:54 crc kubenswrapper[4631]: I1129 04:31:54.966983 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:31:54 crc kubenswrapper[4631]: W1129 04:31:54.987817 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4cb5e84_b5ce_4047_af12_06bf698ab02b.slice/crio-c672822c4babbe754f457f763a41148b9b4bc17445f20fe7779c76df0375c066 WatchSource:0}: Error finding container c672822c4babbe754f457f763a41148b9b4bc17445f20fe7779c76df0375c066: Status 404 returned error can't find the container with id c672822c4babbe754f457f763a41148b9b4bc17445f20fe7779c76df0375c066 Nov 29 04:31:55 crc kubenswrapper[4631]: I1129 04:31:55.227378 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bda5459-96f0-4bc8-8db3-31d78e6ae551" path="/var/lib/kubelet/pods/8bda5459-96f0-4bc8-8db3-31d78e6ae551/volumes" Nov 29 04:31:55 crc kubenswrapper[4631]: I1129 04:31:55.996563 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4cb5e84-b5ce-4047-af12-06bf698ab02b","Type":"ContainerStarted","Data":"c672822c4babbe754f457f763a41148b9b4bc17445f20fe7779c76df0375c066"} Nov 29 04:31:56 crc kubenswrapper[4631]: I1129 04:31:56.636380 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 29 04:31:57 crc kubenswrapper[4631]: I1129 04:31:57.005884 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4cb5e84-b5ce-4047-af12-06bf698ab02b","Type":"ContainerStarted","Data":"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93"} Nov 29 04:31:57 crc kubenswrapper[4631]: I1129 04:31:57.717882 4631 patch_prober.go:28] interesting pod/console-6448654b5-2hh96 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.44:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 04:31:57 crc kubenswrapper[4631]: I1129 04:31:57.718417 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-6448654b5-2hh96" podUID="ef81b43e-ebe5-479f-99aa-db66f264f510" containerName="console" probeResult="failure" output="Get \"https://10.217.0.44:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 29 04:32:01 crc kubenswrapper[4631]: I1129 04:32:01.144676 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:01 crc kubenswrapper[4631]: I1129 04:32:01.373792 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:32:01 crc kubenswrapper[4631]: I1129 04:32:01.375196 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" Nov 29 04:32:03 crc kubenswrapper[4631]: I1129 04:32:03.141423 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 29 04:32:03 crc kubenswrapper[4631]: I1129 04:32:03.266115 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5964d597b6-rfcr2" podUID="e2a6410f-6c69-4b87-a247-b285aef98b71" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Nov 29 04:32:07 crc kubenswrapper[4631]: I1129 04:32:07.102203 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4cb5e84-b5ce-4047-af12-06bf698ab02b","Type":"ContainerStarted","Data":"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08"} Nov 29 04:32:07 crc kubenswrapper[4631]: I1129 04:32:07.103966 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4cb5e84-b5ce-4047-af12-06bf698ab02b","Type":"ContainerStarted","Data":"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351"} Nov 29 04:32:07 crc kubenswrapper[4631]: I1129 04:32:07.104500 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"55accadf-0ac2-4a6e-a640-6b47845f939f","Type":"ContainerStarted","Data":"de16fc11e42fd9cb5bcf919d040e31610d440dd3633298a9325a35c71eca7dc7"} Nov 29 04:32:07 crc kubenswrapper[4631]: I1129 04:32:07.128199 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.647140786 podStartE2EDuration="20.128182881s" podCreationTimestamp="2025-11-29 04:31:47 +0000 UTC" firstStartedPulling="2025-11-29 04:31:48.536540518 +0000 UTC m=+1235.601044032" lastFinishedPulling="2025-11-29 04:32:06.017582613 +0000 UTC m=+1253.082086127" observedRunningTime="2025-11-29 04:32:07.119930439 +0000 UTC m=+1254.184433953" watchObservedRunningTime="2025-11-29 04:32:07.128182881 +0000 UTC m=+1254.192686385" Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.131014 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4cb5e84-b5ce-4047-af12-06bf698ab02b","Type":"ContainerStarted","Data":"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714"} Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.131666 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="ceilometer-central-agent" containerID="cri-o://ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93" gracePeriod=30 Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.131925 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.132161 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="proxy-httpd" containerID="cri-o://83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714" gracePeriod=30 Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.132207 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="sg-core" containerID="cri-o://8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08" gracePeriod=30 Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.132238 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="ceilometer-notification-agent" containerID="cri-o://60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351" gracePeriod=30 Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.148754 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.013996591 podStartE2EDuration="15.148726816s" podCreationTimestamp="2025-11-29 04:31:54 +0000 UTC" firstStartedPulling="2025-11-29 04:31:54.99049674 +0000 UTC m=+1242.055000244" lastFinishedPulling="2025-11-29 04:32:08.125226955 +0000 UTC m=+1255.189730469" observedRunningTime="2025-11-29 04:32:09.148314796 +0000 UTC m=+1256.212818310" watchObservedRunningTime="2025-11-29 04:32:09.148726816 +0000 UTC m=+1256.213230330" Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.870993 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.988865 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-log-httpd\") pod \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.988967 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-run-httpd\") pod \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.989033 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4th8\" (UniqueName: \"kubernetes.io/projected/a4cb5e84-b5ce-4047-af12-06bf698ab02b-kube-api-access-l4th8\") pod \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.989090 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-scripts\") pod \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.989155 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-combined-ca-bundle\") pod \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.989180 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-config-data\") pod \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.989225 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-sg-core-conf-yaml\") pod \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\" (UID: \"a4cb5e84-b5ce-4047-af12-06bf698ab02b\") " Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.989656 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a4cb5e84-b5ce-4047-af12-06bf698ab02b" (UID: "a4cb5e84-b5ce-4047-af12-06bf698ab02b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.990119 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a4cb5e84-b5ce-4047-af12-06bf698ab02b" (UID: "a4cb5e84-b5ce-4047-af12-06bf698ab02b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:09 crc kubenswrapper[4631]: I1129 04:32:09.994571 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-scripts" (OuterVolumeSpecName: "scripts") pod "a4cb5e84-b5ce-4047-af12-06bf698ab02b" (UID: "a4cb5e84-b5ce-4047-af12-06bf698ab02b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.000676 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4cb5e84-b5ce-4047-af12-06bf698ab02b-kube-api-access-l4th8" (OuterVolumeSpecName: "kube-api-access-l4th8") pod "a4cb5e84-b5ce-4047-af12-06bf698ab02b" (UID: "a4cb5e84-b5ce-4047-af12-06bf698ab02b"). InnerVolumeSpecName "kube-api-access-l4th8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.028294 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a4cb5e84-b5ce-4047-af12-06bf698ab02b" (UID: "a4cb5e84-b5ce-4047-af12-06bf698ab02b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.070616 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a4cb5e84-b5ce-4047-af12-06bf698ab02b" (UID: "a4cb5e84-b5ce-4047-af12-06bf698ab02b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.091425 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.091465 4631 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.091477 4631 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.091488 4631 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a4cb5e84-b5ce-4047-af12-06bf698ab02b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.091498 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4th8\" (UniqueName: \"kubernetes.io/projected/a4cb5e84-b5ce-4047-af12-06bf698ab02b-kube-api-access-l4th8\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.091513 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.099490 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-config-data" (OuterVolumeSpecName: "config-data") pod "a4cb5e84-b5ce-4047-af12-06bf698ab02b" (UID: "a4cb5e84-b5ce-4047-af12-06bf698ab02b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.142973 4631 generic.go:334] "Generic (PLEG): container finished" podID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerID="83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714" exitCode=0 Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.143014 4631 generic.go:334] "Generic (PLEG): container finished" podID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerID="8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08" exitCode=2 Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.143022 4631 generic.go:334] "Generic (PLEG): container finished" podID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerID="60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351" exitCode=0 Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.143024 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.143043 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4cb5e84-b5ce-4047-af12-06bf698ab02b","Type":"ContainerDied","Data":"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714"} Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.143071 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4cb5e84-b5ce-4047-af12-06bf698ab02b","Type":"ContainerDied","Data":"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08"} Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.143081 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4cb5e84-b5ce-4047-af12-06bf698ab02b","Type":"ContainerDied","Data":"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351"} Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.143090 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4cb5e84-b5ce-4047-af12-06bf698ab02b","Type":"ContainerDied","Data":"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93"} Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.143106 4631 scope.go:117] "RemoveContainer" containerID="83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.143030 4631 generic.go:334] "Generic (PLEG): container finished" podID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerID="ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93" exitCode=0 Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.143214 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a4cb5e84-b5ce-4047-af12-06bf698ab02b","Type":"ContainerDied","Data":"c672822c4babbe754f457f763a41148b9b4bc17445f20fe7779c76df0375c066"} Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.160789 4631 scope.go:117] "RemoveContainer" containerID="8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.180196 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.181957 4631 scope.go:117] "RemoveContainer" containerID="60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.188687 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.193131 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4cb5e84-b5ce-4047-af12-06bf698ab02b-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.216618 4631 scope.go:117] "RemoveContainer" containerID="ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.232311 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:10 crc kubenswrapper[4631]: E1129 04:32:10.232944 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="proxy-httpd" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.232962 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="proxy-httpd" Nov 29 04:32:10 crc kubenswrapper[4631]: E1129 04:32:10.232986 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="ceilometer-notification-agent" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.232992 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="ceilometer-notification-agent" Nov 29 04:32:10 crc kubenswrapper[4631]: E1129 04:32:10.233030 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="sg-core" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.233036 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="sg-core" Nov 29 04:32:10 crc kubenswrapper[4631]: E1129 04:32:10.233061 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="ceilometer-central-agent" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.233067 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="ceilometer-central-agent" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.233393 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="proxy-httpd" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.233420 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="ceilometer-notification-agent" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.233442 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="sg-core" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.233461 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" containerName="ceilometer-central-agent" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.238387 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.249243 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.265441 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.265665 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.296261 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-run-httpd\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.296479 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.296557 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-log-httpd\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.296638 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-config-data\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.296775 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtcgl\" (UniqueName: \"kubernetes.io/projected/84d7e0f9-cbce-46f7-ba28-205eb1867850-kube-api-access-dtcgl\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.296860 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-scripts\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.296963 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.331152 4631 scope.go:117] "RemoveContainer" containerID="83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714" Nov 29 04:32:10 crc kubenswrapper[4631]: E1129 04:32:10.332203 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714\": container with ID starting with 83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714 not found: ID does not exist" containerID="83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.332227 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714"} err="failed to get container status \"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714\": rpc error: code = NotFound desc = could not find container \"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714\": container with ID starting with 83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.332255 4631 scope.go:117] "RemoveContainer" containerID="8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08" Nov 29 04:32:10 crc kubenswrapper[4631]: E1129 04:32:10.332658 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08\": container with ID starting with 8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08 not found: ID does not exist" containerID="8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.332679 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08"} err="failed to get container status \"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08\": rpc error: code = NotFound desc = could not find container \"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08\": container with ID starting with 8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.332692 4631 scope.go:117] "RemoveContainer" containerID="60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351" Nov 29 04:32:10 crc kubenswrapper[4631]: E1129 04:32:10.332903 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351\": container with ID starting with 60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351 not found: ID does not exist" containerID="60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.332920 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351"} err="failed to get container status \"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351\": rpc error: code = NotFound desc = could not find container \"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351\": container with ID starting with 60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.332932 4631 scope.go:117] "RemoveContainer" containerID="ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93" Nov 29 04:32:10 crc kubenswrapper[4631]: E1129 04:32:10.333086 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93\": container with ID starting with ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93 not found: ID does not exist" containerID="ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.333104 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93"} err="failed to get container status \"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93\": rpc error: code = NotFound desc = could not find container \"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93\": container with ID starting with ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.333116 4631 scope.go:117] "RemoveContainer" containerID="83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.333260 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714"} err="failed to get container status \"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714\": rpc error: code = NotFound desc = could not find container \"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714\": container with ID starting with 83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.333275 4631 scope.go:117] "RemoveContainer" containerID="8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.333552 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08"} err="failed to get container status \"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08\": rpc error: code = NotFound desc = could not find container \"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08\": container with ID starting with 8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.333568 4631 scope.go:117] "RemoveContainer" containerID="60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.333712 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351"} err="failed to get container status \"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351\": rpc error: code = NotFound desc = could not find container \"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351\": container with ID starting with 60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.333727 4631 scope.go:117] "RemoveContainer" containerID="ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.333861 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93"} err="failed to get container status \"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93\": rpc error: code = NotFound desc = could not find container \"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93\": container with ID starting with ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.333878 4631 scope.go:117] "RemoveContainer" containerID="83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334013 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714"} err="failed to get container status \"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714\": rpc error: code = NotFound desc = could not find container \"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714\": container with ID starting with 83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334027 4631 scope.go:117] "RemoveContainer" containerID="8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334155 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08"} err="failed to get container status \"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08\": rpc error: code = NotFound desc = could not find container \"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08\": container with ID starting with 8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334171 4631 scope.go:117] "RemoveContainer" containerID="60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334312 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351"} err="failed to get container status \"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351\": rpc error: code = NotFound desc = could not find container \"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351\": container with ID starting with 60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334390 4631 scope.go:117] "RemoveContainer" containerID="ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334550 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93"} err="failed to get container status \"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93\": rpc error: code = NotFound desc = could not find container \"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93\": container with ID starting with ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334570 4631 scope.go:117] "RemoveContainer" containerID="83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334757 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714"} err="failed to get container status \"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714\": rpc error: code = NotFound desc = could not find container \"83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714\": container with ID starting with 83ecbef51a747de2dbc59dcf1729d6a65d788a330fb36f696c975d24bfd31714 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334772 4631 scope.go:117] "RemoveContainer" containerID="8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334976 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08"} err="failed to get container status \"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08\": rpc error: code = NotFound desc = could not find container \"8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08\": container with ID starting with 8f3940c66dbaf63c17c4118a359e4e1abfc5cf0bd5c9448e697c0e0245be6d08 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.334990 4631 scope.go:117] "RemoveContainer" containerID="60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.335822 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351"} err="failed to get container status \"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351\": rpc error: code = NotFound desc = could not find container \"60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351\": container with ID starting with 60289d7cd41eec273582b9aeb40adff1e356b5a15c9ef23eb1a93c7b48a3f351 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.335842 4631 scope.go:117] "RemoveContainer" containerID="ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.336098 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93"} err="failed to get container status \"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93\": rpc error: code = NotFound desc = could not find container \"ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93\": container with ID starting with ee5b5946934cd27cfc9abf4b65f22a91fc78d2a84ebca5cdf2d93a5de6348e93 not found: ID does not exist" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.398030 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-config-data\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.398147 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtcgl\" (UniqueName: \"kubernetes.io/projected/84d7e0f9-cbce-46f7-ba28-205eb1867850-kube-api-access-dtcgl\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.398180 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-scripts\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.398306 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.398386 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-run-httpd\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.398412 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.398430 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-log-httpd\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.399193 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-run-httpd\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.399359 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-log-httpd\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.403809 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.407499 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-config-data\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.409415 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-scripts\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.412997 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.415259 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtcgl\" (UniqueName: \"kubernetes.io/projected/84d7e0f9-cbce-46f7-ba28-205eb1867850-kube-api-access-dtcgl\") pod \"ceilometer-0\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " pod="openstack/ceilometer-0" Nov 29 04:32:10 crc kubenswrapper[4631]: I1129 04:32:10.632763 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:11 crc kubenswrapper[4631]: I1129 04:32:11.226661 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4cb5e84-b5ce-4047-af12-06bf698ab02b" path="/var/lib/kubelet/pods/a4cb5e84-b5ce-4047-af12-06bf698ab02b/volumes" Nov 29 04:32:11 crc kubenswrapper[4631]: I1129 04:32:11.522226 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:12 crc kubenswrapper[4631]: I1129 04:32:12.164322 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84d7e0f9-cbce-46f7-ba28-205eb1867850","Type":"ContainerStarted","Data":"1cf9aa3b401bffd5f7ccf5f9a111fdc6ae98a0df390f2bc4b606a5e51897423d"} Nov 29 04:32:13 crc kubenswrapper[4631]: I1129 04:32:13.173761 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84d7e0f9-cbce-46f7-ba28-205eb1867850","Type":"ContainerStarted","Data":"109851c2b06aa6405f17ffa5b6c6c860f37b6b1491f8dd602a7dae95ebceef70"} Nov 29 04:32:13 crc kubenswrapper[4631]: I1129 04:32:13.532075 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:13 crc kubenswrapper[4631]: I1129 04:32:13.979400 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-s6nv9"] Nov 29 04:32:13 crc kubenswrapper[4631]: I1129 04:32:13.984201 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-s6nv9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.028413 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-s6nv9"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.067535 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8591303e-f15b-46c9-bdef-47d5d4cdde4e-operator-scripts\") pod \"nova-api-db-create-s6nv9\" (UID: \"8591303e-f15b-46c9-bdef-47d5d4cdde4e\") " pod="openstack/nova-api-db-create-s6nv9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.067618 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzkl8\" (UniqueName: \"kubernetes.io/projected/8591303e-f15b-46c9-bdef-47d5d4cdde4e-kube-api-access-tzkl8\") pod \"nova-api-db-create-s6nv9\" (UID: \"8591303e-f15b-46c9-bdef-47d5d4cdde4e\") " pod="openstack/nova-api-db-create-s6nv9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.091775 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-b4692"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.092926 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-b4692" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.107424 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-aa70-account-create-update-rwxwf"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.109078 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-aa70-account-create-update-rwxwf" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.111617 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.118534 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-aa70-account-create-update-rwxwf"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.140958 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-b4692"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.169918 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-pv5l9"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.170539 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdv92\" (UniqueName: \"kubernetes.io/projected/c145c685-b279-4265-b673-a3b6e95fcf38-kube-api-access-jdv92\") pod \"nova-api-aa70-account-create-update-rwxwf\" (UID: \"c145c685-b279-4265-b673-a3b6e95fcf38\") " pod="openstack/nova-api-aa70-account-create-update-rwxwf" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.170665 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbppm\" (UniqueName: \"kubernetes.io/projected/5607b995-0a8c-43a8-b5f7-116f11d800a8-kube-api-access-vbppm\") pod \"nova-cell0-db-create-b4692\" (UID: \"5607b995-0a8c-43a8-b5f7-116f11d800a8\") " pod="openstack/nova-cell0-db-create-b4692" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.170769 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c145c685-b279-4265-b673-a3b6e95fcf38-operator-scripts\") pod \"nova-api-aa70-account-create-update-rwxwf\" (UID: \"c145c685-b279-4265-b673-a3b6e95fcf38\") " pod="openstack/nova-api-aa70-account-create-update-rwxwf" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.170875 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8591303e-f15b-46c9-bdef-47d5d4cdde4e-operator-scripts\") pod \"nova-api-db-create-s6nv9\" (UID: \"8591303e-f15b-46c9-bdef-47d5d4cdde4e\") " pod="openstack/nova-api-db-create-s6nv9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.171000 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzkl8\" (UniqueName: \"kubernetes.io/projected/8591303e-f15b-46c9-bdef-47d5d4cdde4e-kube-api-access-tzkl8\") pod \"nova-api-db-create-s6nv9\" (UID: \"8591303e-f15b-46c9-bdef-47d5d4cdde4e\") " pod="openstack/nova-api-db-create-s6nv9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.171078 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5607b995-0a8c-43a8-b5f7-116f11d800a8-operator-scripts\") pod \"nova-cell0-db-create-b4692\" (UID: \"5607b995-0a8c-43a8-b5f7-116f11d800a8\") " pod="openstack/nova-cell0-db-create-b4692" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.171838 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8591303e-f15b-46c9-bdef-47d5d4cdde4e-operator-scripts\") pod \"nova-api-db-create-s6nv9\" (UID: \"8591303e-f15b-46c9-bdef-47d5d4cdde4e\") " pod="openstack/nova-api-db-create-s6nv9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.178557 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pv5l9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.189201 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzkl8\" (UniqueName: \"kubernetes.io/projected/8591303e-f15b-46c9-bdef-47d5d4cdde4e-kube-api-access-tzkl8\") pod \"nova-api-db-create-s6nv9\" (UID: \"8591303e-f15b-46c9-bdef-47d5d4cdde4e\") " pod="openstack/nova-api-db-create-s6nv9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.202683 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-pv5l9"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.213601 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84d7e0f9-cbce-46f7-ba28-205eb1867850","Type":"ContainerStarted","Data":"375024c9a7f4aff024c3cfc271b48911bb45b3a08c7aae42e148faac4c083382"} Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.276178 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c145c685-b279-4265-b673-a3b6e95fcf38-operator-scripts\") pod \"nova-api-aa70-account-create-update-rwxwf\" (UID: \"c145c685-b279-4265-b673-a3b6e95fcf38\") " pod="openstack/nova-api-aa70-account-create-update-rwxwf" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.276424 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cnpt\" (UniqueName: \"kubernetes.io/projected/55e05641-c7df-46fe-9a80-58539d8980ef-kube-api-access-4cnpt\") pod \"nova-cell1-db-create-pv5l9\" (UID: \"55e05641-c7df-46fe-9a80-58539d8980ef\") " pod="openstack/nova-cell1-db-create-pv5l9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.276464 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55e05641-c7df-46fe-9a80-58539d8980ef-operator-scripts\") pod \"nova-cell1-db-create-pv5l9\" (UID: \"55e05641-c7df-46fe-9a80-58539d8980ef\") " pod="openstack/nova-cell1-db-create-pv5l9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.276528 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5607b995-0a8c-43a8-b5f7-116f11d800a8-operator-scripts\") pod \"nova-cell0-db-create-b4692\" (UID: \"5607b995-0a8c-43a8-b5f7-116f11d800a8\") " pod="openstack/nova-cell0-db-create-b4692" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.276575 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdv92\" (UniqueName: \"kubernetes.io/projected/c145c685-b279-4265-b673-a3b6e95fcf38-kube-api-access-jdv92\") pod \"nova-api-aa70-account-create-update-rwxwf\" (UID: \"c145c685-b279-4265-b673-a3b6e95fcf38\") " pod="openstack/nova-api-aa70-account-create-update-rwxwf" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.276604 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbppm\" (UniqueName: \"kubernetes.io/projected/5607b995-0a8c-43a8-b5f7-116f11d800a8-kube-api-access-vbppm\") pod \"nova-cell0-db-create-b4692\" (UID: \"5607b995-0a8c-43a8-b5f7-116f11d800a8\") " pod="openstack/nova-cell0-db-create-b4692" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.277776 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5607b995-0a8c-43a8-b5f7-116f11d800a8-operator-scripts\") pod \"nova-cell0-db-create-b4692\" (UID: \"5607b995-0a8c-43a8-b5f7-116f11d800a8\") " pod="openstack/nova-cell0-db-create-b4692" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.277878 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c145c685-b279-4265-b673-a3b6e95fcf38-operator-scripts\") pod \"nova-api-aa70-account-create-update-rwxwf\" (UID: \"c145c685-b279-4265-b673-a3b6e95fcf38\") " pod="openstack/nova-api-aa70-account-create-update-rwxwf" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.281103 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-2d74-account-create-update-r2knb"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.292966 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2d74-account-create-update-r2knb" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.304144 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-2d74-account-create-update-r2knb"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.317070 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.322990 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdv92\" (UniqueName: \"kubernetes.io/projected/c145c685-b279-4265-b673-a3b6e95fcf38-kube-api-access-jdv92\") pod \"nova-api-aa70-account-create-update-rwxwf\" (UID: \"c145c685-b279-4265-b673-a3b6e95fcf38\") " pod="openstack/nova-api-aa70-account-create-update-rwxwf" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.323458 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbppm\" (UniqueName: \"kubernetes.io/projected/5607b995-0a8c-43a8-b5f7-116f11d800a8-kube-api-access-vbppm\") pod \"nova-cell0-db-create-b4692\" (UID: \"5607b995-0a8c-43a8-b5f7-116f11d800a8\") " pod="openstack/nova-cell0-db-create-b4692" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.378081 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55e05641-c7df-46fe-9a80-58539d8980ef-operator-scripts\") pod \"nova-cell1-db-create-pv5l9\" (UID: \"55e05641-c7df-46fe-9a80-58539d8980ef\") " pod="openstack/nova-cell1-db-create-pv5l9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.378378 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvtsv\" (UniqueName: \"kubernetes.io/projected/3e666e44-54c6-4b2d-9181-16f640203eff-kube-api-access-wvtsv\") pod \"nova-cell0-2d74-account-create-update-r2knb\" (UID: \"3e666e44-54c6-4b2d-9181-16f640203eff\") " pod="openstack/nova-cell0-2d74-account-create-update-r2knb" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.378510 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e666e44-54c6-4b2d-9181-16f640203eff-operator-scripts\") pod \"nova-cell0-2d74-account-create-update-r2knb\" (UID: \"3e666e44-54c6-4b2d-9181-16f640203eff\") " pod="openstack/nova-cell0-2d74-account-create-update-r2knb" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.378683 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cnpt\" (UniqueName: \"kubernetes.io/projected/55e05641-c7df-46fe-9a80-58539d8980ef-kube-api-access-4cnpt\") pod \"nova-cell1-db-create-pv5l9\" (UID: \"55e05641-c7df-46fe-9a80-58539d8980ef\") " pod="openstack/nova-cell1-db-create-pv5l9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.379609 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55e05641-c7df-46fe-9a80-58539d8980ef-operator-scripts\") pod \"nova-cell1-db-create-pv5l9\" (UID: \"55e05641-c7df-46fe-9a80-58539d8980ef\") " pod="openstack/nova-cell1-db-create-pv5l9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.394786 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-s6nv9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.397295 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cnpt\" (UniqueName: \"kubernetes.io/projected/55e05641-c7df-46fe-9a80-58539d8980ef-kube-api-access-4cnpt\") pod \"nova-cell1-db-create-pv5l9\" (UID: \"55e05641-c7df-46fe-9a80-58539d8980ef\") " pod="openstack/nova-cell1-db-create-pv5l9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.439906 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-b4692" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.460076 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-5551-account-create-update-7cf8w"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.461134 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5551-account-create-update-7cf8w" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.462570 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-aa70-account-create-update-rwxwf" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.467707 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.480739 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvtsv\" (UniqueName: \"kubernetes.io/projected/3e666e44-54c6-4b2d-9181-16f640203eff-kube-api-access-wvtsv\") pod \"nova-cell0-2d74-account-create-update-r2knb\" (UID: \"3e666e44-54c6-4b2d-9181-16f640203eff\") " pod="openstack/nova-cell0-2d74-account-create-update-r2knb" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.480910 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e666e44-54c6-4b2d-9181-16f640203eff-operator-scripts\") pod \"nova-cell0-2d74-account-create-update-r2knb\" (UID: \"3e666e44-54c6-4b2d-9181-16f640203eff\") " pod="openstack/nova-cell0-2d74-account-create-update-r2knb" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.481584 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e666e44-54c6-4b2d-9181-16f640203eff-operator-scripts\") pod \"nova-cell0-2d74-account-create-update-r2knb\" (UID: \"3e666e44-54c6-4b2d-9181-16f640203eff\") " pod="openstack/nova-cell0-2d74-account-create-update-r2knb" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.492309 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5551-account-create-update-7cf8w"] Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.515956 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvtsv\" (UniqueName: \"kubernetes.io/projected/3e666e44-54c6-4b2d-9181-16f640203eff-kube-api-access-wvtsv\") pod \"nova-cell0-2d74-account-create-update-r2knb\" (UID: \"3e666e44-54c6-4b2d-9181-16f640203eff\") " pod="openstack/nova-cell0-2d74-account-create-update-r2knb" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.580886 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pv5l9" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.582180 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8x9s\" (UniqueName: \"kubernetes.io/projected/71733be1-1b42-43f0-8154-997d8b5f800f-kube-api-access-h8x9s\") pod \"nova-cell1-5551-account-create-update-7cf8w\" (UID: \"71733be1-1b42-43f0-8154-997d8b5f800f\") " pod="openstack/nova-cell1-5551-account-create-update-7cf8w" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.582257 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/71733be1-1b42-43f0-8154-997d8b5f800f-operator-scripts\") pod \"nova-cell1-5551-account-create-update-7cf8w\" (UID: \"71733be1-1b42-43f0-8154-997d8b5f800f\") " pod="openstack/nova-cell1-5551-account-create-update-7cf8w" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.691632 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8x9s\" (UniqueName: \"kubernetes.io/projected/71733be1-1b42-43f0-8154-997d8b5f800f-kube-api-access-h8x9s\") pod \"nova-cell1-5551-account-create-update-7cf8w\" (UID: \"71733be1-1b42-43f0-8154-997d8b5f800f\") " pod="openstack/nova-cell1-5551-account-create-update-7cf8w" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.691958 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/71733be1-1b42-43f0-8154-997d8b5f800f-operator-scripts\") pod \"nova-cell1-5551-account-create-update-7cf8w\" (UID: \"71733be1-1b42-43f0-8154-997d8b5f800f\") " pod="openstack/nova-cell1-5551-account-create-update-7cf8w" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.692961 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/71733be1-1b42-43f0-8154-997d8b5f800f-operator-scripts\") pod \"nova-cell1-5551-account-create-update-7cf8w\" (UID: \"71733be1-1b42-43f0-8154-997d8b5f800f\") " pod="openstack/nova-cell1-5551-account-create-update-7cf8w" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.698047 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2d74-account-create-update-r2knb" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.733321 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8x9s\" (UniqueName: \"kubernetes.io/projected/71733be1-1b42-43f0-8154-997d8b5f800f-kube-api-access-h8x9s\") pod \"nova-cell1-5551-account-create-update-7cf8w\" (UID: \"71733be1-1b42-43f0-8154-997d8b5f800f\") " pod="openstack/nova-cell1-5551-account-create-update-7cf8w" Nov 29 04:32:14 crc kubenswrapper[4631]: I1129 04:32:14.800111 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5551-account-create-update-7cf8w" Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.203582 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-aa70-account-create-update-rwxwf"] Nov 29 04:32:15 crc kubenswrapper[4631]: W1129 04:32:15.211772 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc145c685_b279_4265_b673_a3b6e95fcf38.slice/crio-483d827413496f57169fe4b1bbfd25601da46c71de0b36b1e539b586f9c197e3 WatchSource:0}: Error finding container 483d827413496f57169fe4b1bbfd25601da46c71de0b36b1e539b586f9c197e3: Status 404 returned error can't find the container with id 483d827413496f57169fe4b1bbfd25601da46c71de0b36b1e539b586f9c197e3 Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.238949 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-aa70-account-create-update-rwxwf" event={"ID":"c145c685-b279-4265-b673-a3b6e95fcf38","Type":"ContainerStarted","Data":"483d827413496f57169fe4b1bbfd25601da46c71de0b36b1e539b586f9c197e3"} Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.239104 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-s6nv9"] Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.245106 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84d7e0f9-cbce-46f7-ba28-205eb1867850","Type":"ContainerStarted","Data":"68d56f0cdf5148f92cca96806afa0d0c5d78bfde69e7985e10b286448009123a"} Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.474509 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-pv5l9"] Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.484872 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-b4692"] Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.506277 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-2d74-account-create-update-r2knb"] Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.679587 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5551-account-create-update-7cf8w"] Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.736474 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.736694 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="caf43f43-1632-4a05-902b-6c25b8dadf71" containerName="glance-log" containerID="cri-o://9888df03259e546ad91a21768846ce5b505694ccd1d719cc5de7d804d697b58f" gracePeriod=30 Nov 29 04:32:15 crc kubenswrapper[4631]: I1129 04:32:15.737118 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="caf43f43-1632-4a05-902b-6c25b8dadf71" containerName="glance-httpd" containerID="cri-o://6df2a7ecf04d95c354794cb2f4e8b4f282fd1d005dbe3597d2e228968aeb5401" gracePeriod=30 Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.265151 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2d74-account-create-update-r2knb" event={"ID":"3e666e44-54c6-4b2d-9181-16f640203eff","Type":"ContainerStarted","Data":"b33b30a8200e4e8a6ad802cb260e256ac69f6c85bcd74b142d3de83bfa2e6199"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.265482 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2d74-account-create-update-r2knb" event={"ID":"3e666e44-54c6-4b2d-9181-16f640203eff","Type":"ContainerStarted","Data":"48d78a774a3d41c495ae53483e05a9700a6c336cca331a9baf9da94908462abf"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.270398 4631 generic.go:334] "Generic (PLEG): container finished" podID="8591303e-f15b-46c9-bdef-47d5d4cdde4e" containerID="523a9b297b5cf53c00f0e2ae87ca875539ed5c6ce7e3ca78ae7bdce709e2d2b8" exitCode=0 Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.270481 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-s6nv9" event={"ID":"8591303e-f15b-46c9-bdef-47d5d4cdde4e","Type":"ContainerDied","Data":"523a9b297b5cf53c00f0e2ae87ca875539ed5c6ce7e3ca78ae7bdce709e2d2b8"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.270507 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-s6nv9" event={"ID":"8591303e-f15b-46c9-bdef-47d5d4cdde4e","Type":"ContainerStarted","Data":"9d4c774c45b61a381e647d9f7db1c435b93b9c7eb68b154d6ebb0ed5383968d9"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.275898 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-b4692" event={"ID":"5607b995-0a8c-43a8-b5f7-116f11d800a8","Type":"ContainerStarted","Data":"fcb5e613cb6880f4d89e4f7392aebe0b2a0c42956d2ec100965073726021bad2"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.275928 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-b4692" event={"ID":"5607b995-0a8c-43a8-b5f7-116f11d800a8","Type":"ContainerStarted","Data":"dc2aeb06c49f17f05dd0034a5b2345a8eef2a42dd3433a3de8bd7dbdf53ee18f"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.277863 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pv5l9" event={"ID":"55e05641-c7df-46fe-9a80-58539d8980ef","Type":"ContainerStarted","Data":"d46e8493d02ad1167e29938e31d0b1634a0c3d0cb1c153a7aa0094bcab49b8dd"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.277885 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pv5l9" event={"ID":"55e05641-c7df-46fe-9a80-58539d8980ef","Type":"ContainerStarted","Data":"ed73dd56db1d0fc3bf5a4d80676eca846077c08b8b781f98f4329eae5e83068d"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.282874 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-2d74-account-create-update-r2knb" podStartSLOduration=2.282860389 podStartE2EDuration="2.282860389s" podCreationTimestamp="2025-11-29 04:32:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:32:16.28006285 +0000 UTC m=+1263.344566364" watchObservedRunningTime="2025-11-29 04:32:16.282860389 +0000 UTC m=+1263.347363903" Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.292258 4631 generic.go:334] "Generic (PLEG): container finished" podID="caf43f43-1632-4a05-902b-6c25b8dadf71" containerID="9888df03259e546ad91a21768846ce5b505694ccd1d719cc5de7d804d697b58f" exitCode=143 Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.292346 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"caf43f43-1632-4a05-902b-6c25b8dadf71","Type":"ContainerDied","Data":"9888df03259e546ad91a21768846ce5b505694ccd1d719cc5de7d804d697b58f"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.300040 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-b4692" podStartSLOduration=2.300026218 podStartE2EDuration="2.300026218s" podCreationTimestamp="2025-11-29 04:32:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:32:16.292228768 +0000 UTC m=+1263.356732282" watchObservedRunningTime="2025-11-29 04:32:16.300026218 +0000 UTC m=+1263.364529732" Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.300467 4631 generic.go:334] "Generic (PLEG): container finished" podID="c145c685-b279-4265-b673-a3b6e95fcf38" containerID="f4ba32e188f4697a94b502e4e6a9a68b09e9a7c5abaf290d07ad717f6b6c0345" exitCode=0 Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.300615 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-aa70-account-create-update-rwxwf" event={"ID":"c145c685-b279-4265-b673-a3b6e95fcf38","Type":"ContainerDied","Data":"f4ba32e188f4697a94b502e4e6a9a68b09e9a7c5abaf290d07ad717f6b6c0345"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.304878 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5551-account-create-update-7cf8w" event={"ID":"71733be1-1b42-43f0-8154-997d8b5f800f","Type":"ContainerStarted","Data":"86452593e1b36b12ca2c66a4cc29f596766a4e6d152d29c4a7313de5055be64f"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.304929 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5551-account-create-update-7cf8w" event={"ID":"71733be1-1b42-43f0-8154-997d8b5f800f","Type":"ContainerStarted","Data":"91dc9fbf6dc8a7e9e13d0cfd671015a2f823ce4a6a5ee189e4e04ec07b3b453f"} Nov 29 04:32:16 crc kubenswrapper[4631]: I1129 04:32:16.338563 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-5551-account-create-update-7cf8w" podStartSLOduration=2.33854969 podStartE2EDuration="2.33854969s" podCreationTimestamp="2025-11-29 04:32:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:32:16.334683205 +0000 UTC m=+1263.399186719" watchObservedRunningTime="2025-11-29 04:32:16.33854969 +0000 UTC m=+1263.403053204" Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.169885 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.173453 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.326711 4631 generic.go:334] "Generic (PLEG): container finished" podID="71733be1-1b42-43f0-8154-997d8b5f800f" containerID="86452593e1b36b12ca2c66a4cc29f596766a4e6d152d29c4a7313de5055be64f" exitCode=0 Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.326895 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5551-account-create-update-7cf8w" event={"ID":"71733be1-1b42-43f0-8154-997d8b5f800f","Type":"ContainerDied","Data":"86452593e1b36b12ca2c66a4cc29f596766a4e6d152d29c4a7313de5055be64f"} Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.339681 4631 generic.go:334] "Generic (PLEG): container finished" podID="3e666e44-54c6-4b2d-9181-16f640203eff" containerID="b33b30a8200e4e8a6ad802cb260e256ac69f6c85bcd74b142d3de83bfa2e6199" exitCode=0 Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.339760 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2d74-account-create-update-r2knb" event={"ID":"3e666e44-54c6-4b2d-9181-16f640203eff","Type":"ContainerDied","Data":"b33b30a8200e4e8a6ad802cb260e256ac69f6c85bcd74b142d3de83bfa2e6199"} Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.350693 4631 generic.go:334] "Generic (PLEG): container finished" podID="5607b995-0a8c-43a8-b5f7-116f11d800a8" containerID="fcb5e613cb6880f4d89e4f7392aebe0b2a0c42956d2ec100965073726021bad2" exitCode=0 Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.350820 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-b4692" event={"ID":"5607b995-0a8c-43a8-b5f7-116f11d800a8","Type":"ContainerDied","Data":"fcb5e613cb6880f4d89e4f7392aebe0b2a0c42956d2ec100965073726021bad2"} Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.362121 4631 generic.go:334] "Generic (PLEG): container finished" podID="55e05641-c7df-46fe-9a80-58539d8980ef" containerID="d46e8493d02ad1167e29938e31d0b1634a0c3d0cb1c153a7aa0094bcab49b8dd" exitCode=0 Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.362208 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pv5l9" event={"ID":"55e05641-c7df-46fe-9a80-58539d8980ef","Type":"ContainerDied","Data":"d46e8493d02ad1167e29938e31d0b1634a0c3d0cb1c153a7aa0094bcab49b8dd"} Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.396072 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84d7e0f9-cbce-46f7-ba28-205eb1867850","Type":"ContainerStarted","Data":"16e2736df261c601512ebe0ed328dd85e41e9eab2d0ed4e7ab55734cc2921175"} Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.396229 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="ceilometer-central-agent" containerID="cri-o://109851c2b06aa6405f17ffa5b6c6c860f37b6b1491f8dd602a7dae95ebceef70" gracePeriod=30 Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.396495 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="proxy-httpd" containerID="cri-o://16e2736df261c601512ebe0ed328dd85e41e9eab2d0ed4e7ab55734cc2921175" gracePeriod=30 Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.396558 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="sg-core" containerID="cri-o://68d56f0cdf5148f92cca96806afa0d0c5d78bfde69e7985e10b286448009123a" gracePeriod=30 Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.396592 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="ceilometer-notification-agent" containerID="cri-o://375024c9a7f4aff024c3cfc271b48911bb45b3a08c7aae42e148faac4c083382" gracePeriod=30 Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.396674 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 04:32:17 crc kubenswrapper[4631]: I1129 04:32:17.426951 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.833056817 podStartE2EDuration="7.426935205s" podCreationTimestamp="2025-11-29 04:32:10 +0000 UTC" firstStartedPulling="2025-11-29 04:32:11.535577502 +0000 UTC m=+1258.600081026" lastFinishedPulling="2025-11-29 04:32:16.1294559 +0000 UTC m=+1263.193959414" observedRunningTime="2025-11-29 04:32:17.423006479 +0000 UTC m=+1264.487509993" watchObservedRunningTime="2025-11-29 04:32:17.426935205 +0000 UTC m=+1264.491438719" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.003926 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pv5l9" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.012677 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-s6nv9" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.019003 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-aa70-account-create-update-rwxwf" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.073519 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55e05641-c7df-46fe-9a80-58539d8980ef-operator-scripts\") pod \"55e05641-c7df-46fe-9a80-58539d8980ef\" (UID: \"55e05641-c7df-46fe-9a80-58539d8980ef\") " Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.073643 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cnpt\" (UniqueName: \"kubernetes.io/projected/55e05641-c7df-46fe-9a80-58539d8980ef-kube-api-access-4cnpt\") pod \"55e05641-c7df-46fe-9a80-58539d8980ef\" (UID: \"55e05641-c7df-46fe-9a80-58539d8980ef\") " Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.073695 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c145c685-b279-4265-b673-a3b6e95fcf38-operator-scripts\") pod \"c145c685-b279-4265-b673-a3b6e95fcf38\" (UID: \"c145c685-b279-4265-b673-a3b6e95fcf38\") " Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.073744 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdv92\" (UniqueName: \"kubernetes.io/projected/c145c685-b279-4265-b673-a3b6e95fcf38-kube-api-access-jdv92\") pod \"c145c685-b279-4265-b673-a3b6e95fcf38\" (UID: \"c145c685-b279-4265-b673-a3b6e95fcf38\") " Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.073772 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzkl8\" (UniqueName: \"kubernetes.io/projected/8591303e-f15b-46c9-bdef-47d5d4cdde4e-kube-api-access-tzkl8\") pod \"8591303e-f15b-46c9-bdef-47d5d4cdde4e\" (UID: \"8591303e-f15b-46c9-bdef-47d5d4cdde4e\") " Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.073877 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8591303e-f15b-46c9-bdef-47d5d4cdde4e-operator-scripts\") pod \"8591303e-f15b-46c9-bdef-47d5d4cdde4e\" (UID: \"8591303e-f15b-46c9-bdef-47d5d4cdde4e\") " Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.074917 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c145c685-b279-4265-b673-a3b6e95fcf38-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c145c685-b279-4265-b673-a3b6e95fcf38" (UID: "c145c685-b279-4265-b673-a3b6e95fcf38"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.075629 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55e05641-c7df-46fe-9a80-58539d8980ef-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "55e05641-c7df-46fe-9a80-58539d8980ef" (UID: "55e05641-c7df-46fe-9a80-58539d8980ef"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.084374 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8591303e-f15b-46c9-bdef-47d5d4cdde4e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8591303e-f15b-46c9-bdef-47d5d4cdde4e" (UID: "8591303e-f15b-46c9-bdef-47d5d4cdde4e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.085669 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8591303e-f15b-46c9-bdef-47d5d4cdde4e-kube-api-access-tzkl8" (OuterVolumeSpecName: "kube-api-access-tzkl8") pod "8591303e-f15b-46c9-bdef-47d5d4cdde4e" (UID: "8591303e-f15b-46c9-bdef-47d5d4cdde4e"). InnerVolumeSpecName "kube-api-access-tzkl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.087580 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55e05641-c7df-46fe-9a80-58539d8980ef-kube-api-access-4cnpt" (OuterVolumeSpecName: "kube-api-access-4cnpt") pod "55e05641-c7df-46fe-9a80-58539d8980ef" (UID: "55e05641-c7df-46fe-9a80-58539d8980ef"). InnerVolumeSpecName "kube-api-access-4cnpt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.106424 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c145c685-b279-4265-b673-a3b6e95fcf38-kube-api-access-jdv92" (OuterVolumeSpecName: "kube-api-access-jdv92") pod "c145c685-b279-4265-b673-a3b6e95fcf38" (UID: "c145c685-b279-4265-b673-a3b6e95fcf38"). InnerVolumeSpecName "kube-api-access-jdv92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.175181 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cnpt\" (UniqueName: \"kubernetes.io/projected/55e05641-c7df-46fe-9a80-58539d8980ef-kube-api-access-4cnpt\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.175214 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c145c685-b279-4265-b673-a3b6e95fcf38-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.175223 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdv92\" (UniqueName: \"kubernetes.io/projected/c145c685-b279-4265-b673-a3b6e95fcf38-kube-api-access-jdv92\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.175233 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzkl8\" (UniqueName: \"kubernetes.io/projected/8591303e-f15b-46c9-bdef-47d5d4cdde4e-kube-api-access-tzkl8\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.175242 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8591303e-f15b-46c9-bdef-47d5d4cdde4e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.175251 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55e05641-c7df-46fe-9a80-58539d8980ef-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.405642 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-aa70-account-create-update-rwxwf" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.405655 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-aa70-account-create-update-rwxwf" event={"ID":"c145c685-b279-4265-b673-a3b6e95fcf38","Type":"ContainerDied","Data":"483d827413496f57169fe4b1bbfd25601da46c71de0b36b1e539b586f9c197e3"} Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.405688 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="483d827413496f57169fe4b1bbfd25601da46c71de0b36b1e539b586f9c197e3" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.408218 4631 generic.go:334] "Generic (PLEG): container finished" podID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerID="16e2736df261c601512ebe0ed328dd85e41e9eab2d0ed4e7ab55734cc2921175" exitCode=0 Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.408255 4631 generic.go:334] "Generic (PLEG): container finished" podID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerID="68d56f0cdf5148f92cca96806afa0d0c5d78bfde69e7985e10b286448009123a" exitCode=2 Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.408263 4631 generic.go:334] "Generic (PLEG): container finished" podID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerID="375024c9a7f4aff024c3cfc271b48911bb45b3a08c7aae42e148faac4c083382" exitCode=0 Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.408293 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84d7e0f9-cbce-46f7-ba28-205eb1867850","Type":"ContainerDied","Data":"16e2736df261c601512ebe0ed328dd85e41e9eab2d0ed4e7ab55734cc2921175"} Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.408356 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84d7e0f9-cbce-46f7-ba28-205eb1867850","Type":"ContainerDied","Data":"68d56f0cdf5148f92cca96806afa0d0c5d78bfde69e7985e10b286448009123a"} Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.408368 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84d7e0f9-cbce-46f7-ba28-205eb1867850","Type":"ContainerDied","Data":"375024c9a7f4aff024c3cfc271b48911bb45b3a08c7aae42e148faac4c083382"} Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.409629 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-s6nv9" event={"ID":"8591303e-f15b-46c9-bdef-47d5d4cdde4e","Type":"ContainerDied","Data":"9d4c774c45b61a381e647d9f7db1c435b93b9c7eb68b154d6ebb0ed5383968d9"} Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.409668 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d4c774c45b61a381e647d9f7db1c435b93b9c7eb68b154d6ebb0ed5383968d9" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.409637 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-s6nv9" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.410879 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pv5l9" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.413545 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pv5l9" event={"ID":"55e05641-c7df-46fe-9a80-58539d8980ef","Type":"ContainerDied","Data":"ed73dd56db1d0fc3bf5a4d80676eca846077c08b8b781f98f4329eae5e83068d"} Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.413572 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed73dd56db1d0fc3bf5a4d80676eca846077c08b8b781f98f4329eae5e83068d" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.806652 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2d74-account-create-update-r2knb" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.891250 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvtsv\" (UniqueName: \"kubernetes.io/projected/3e666e44-54c6-4b2d-9181-16f640203eff-kube-api-access-wvtsv\") pod \"3e666e44-54c6-4b2d-9181-16f640203eff\" (UID: \"3e666e44-54c6-4b2d-9181-16f640203eff\") " Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.891534 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e666e44-54c6-4b2d-9181-16f640203eff-operator-scripts\") pod \"3e666e44-54c6-4b2d-9181-16f640203eff\" (UID: \"3e666e44-54c6-4b2d-9181-16f640203eff\") " Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.892210 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e666e44-54c6-4b2d-9181-16f640203eff-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3e666e44-54c6-4b2d-9181-16f640203eff" (UID: "3e666e44-54c6-4b2d-9181-16f640203eff"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.899685 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e666e44-54c6-4b2d-9181-16f640203eff-kube-api-access-wvtsv" (OuterVolumeSpecName: "kube-api-access-wvtsv") pod "3e666e44-54c6-4b2d-9181-16f640203eff" (UID: "3e666e44-54c6-4b2d-9181-16f640203eff"). InnerVolumeSpecName "kube-api-access-wvtsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.991374 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5551-account-create-update-7cf8w" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.993947 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e666e44-54c6-4b2d-9181-16f640203eff-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.993970 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvtsv\" (UniqueName: \"kubernetes.io/projected/3e666e44-54c6-4b2d-9181-16f640203eff-kube-api-access-wvtsv\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:18 crc kubenswrapper[4631]: I1129 04:32:18.994061 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-b4692" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.095587 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/71733be1-1b42-43f0-8154-997d8b5f800f-operator-scripts\") pod \"71733be1-1b42-43f0-8154-997d8b5f800f\" (UID: \"71733be1-1b42-43f0-8154-997d8b5f800f\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.095740 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8x9s\" (UniqueName: \"kubernetes.io/projected/71733be1-1b42-43f0-8154-997d8b5f800f-kube-api-access-h8x9s\") pod \"71733be1-1b42-43f0-8154-997d8b5f800f\" (UID: \"71733be1-1b42-43f0-8154-997d8b5f800f\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.095802 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5607b995-0a8c-43a8-b5f7-116f11d800a8-operator-scripts\") pod \"5607b995-0a8c-43a8-b5f7-116f11d800a8\" (UID: \"5607b995-0a8c-43a8-b5f7-116f11d800a8\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.095846 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbppm\" (UniqueName: \"kubernetes.io/projected/5607b995-0a8c-43a8-b5f7-116f11d800a8-kube-api-access-vbppm\") pod \"5607b995-0a8c-43a8-b5f7-116f11d800a8\" (UID: \"5607b995-0a8c-43a8-b5f7-116f11d800a8\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.095974 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71733be1-1b42-43f0-8154-997d8b5f800f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "71733be1-1b42-43f0-8154-997d8b5f800f" (UID: "71733be1-1b42-43f0-8154-997d8b5f800f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.096199 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/71733be1-1b42-43f0-8154-997d8b5f800f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.096306 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5607b995-0a8c-43a8-b5f7-116f11d800a8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5607b995-0a8c-43a8-b5f7-116f11d800a8" (UID: "5607b995-0a8c-43a8-b5f7-116f11d800a8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.101373 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5607b995-0a8c-43a8-b5f7-116f11d800a8-kube-api-access-vbppm" (OuterVolumeSpecName: "kube-api-access-vbppm") pod "5607b995-0a8c-43a8-b5f7-116f11d800a8" (UID: "5607b995-0a8c-43a8-b5f7-116f11d800a8"). InnerVolumeSpecName "kube-api-access-vbppm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.105756 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71733be1-1b42-43f0-8154-997d8b5f800f-kube-api-access-h8x9s" (OuterVolumeSpecName: "kube-api-access-h8x9s") pod "71733be1-1b42-43f0-8154-997d8b5f800f" (UID: "71733be1-1b42-43f0-8154-997d8b5f800f"). InnerVolumeSpecName "kube-api-access-h8x9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.198186 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8x9s\" (UniqueName: \"kubernetes.io/projected/71733be1-1b42-43f0-8154-997d8b5f800f-kube-api-access-h8x9s\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.198218 4631 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5607b995-0a8c-43a8-b5f7-116f11d800a8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.198228 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbppm\" (UniqueName: \"kubernetes.io/projected/5607b995-0a8c-43a8-b5f7-116f11d800a8-kube-api-access-vbppm\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.431385 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2d74-account-create-update-r2knb" event={"ID":"3e666e44-54c6-4b2d-9181-16f640203eff","Type":"ContainerDied","Data":"48d78a774a3d41c495ae53483e05a9700a6c336cca331a9baf9da94908462abf"} Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.431628 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="48d78a774a3d41c495ae53483e05a9700a6c336cca331a9baf9da94908462abf" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.431424 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2d74-account-create-update-r2knb" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.433494 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-b4692" event={"ID":"5607b995-0a8c-43a8-b5f7-116f11d800a8","Type":"ContainerDied","Data":"dc2aeb06c49f17f05dd0034a5b2345a8eef2a42dd3433a3de8bd7dbdf53ee18f"} Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.433526 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc2aeb06c49f17f05dd0034a5b2345a8eef2a42dd3433a3de8bd7dbdf53ee18f" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.433564 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-b4692" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.435947 4631 generic.go:334] "Generic (PLEG): container finished" podID="caf43f43-1632-4a05-902b-6c25b8dadf71" containerID="6df2a7ecf04d95c354794cb2f4e8b4f282fd1d005dbe3597d2e228968aeb5401" exitCode=0 Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.435987 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"caf43f43-1632-4a05-902b-6c25b8dadf71","Type":"ContainerDied","Data":"6df2a7ecf04d95c354794cb2f4e8b4f282fd1d005dbe3597d2e228968aeb5401"} Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.441260 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5551-account-create-update-7cf8w" event={"ID":"71733be1-1b42-43f0-8154-997d8b5f800f","Type":"ContainerDied","Data":"91dc9fbf6dc8a7e9e13d0cfd671015a2f823ce4a6a5ee189e4e04ec07b3b453f"} Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.441287 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91dc9fbf6dc8a7e9e13d0cfd671015a2f823ce4a6a5ee189e4e04ec07b3b453f" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.441316 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5551-account-create-update-7cf8w" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.682996 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.811657 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbddt\" (UniqueName: \"kubernetes.io/projected/caf43f43-1632-4a05-902b-6c25b8dadf71-kube-api-access-dbddt\") pod \"caf43f43-1632-4a05-902b-6c25b8dadf71\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.811725 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-httpd-run\") pod \"caf43f43-1632-4a05-902b-6c25b8dadf71\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.811769 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"caf43f43-1632-4a05-902b-6c25b8dadf71\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.811858 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-scripts\") pod \"caf43f43-1632-4a05-902b-6c25b8dadf71\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.811903 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-internal-tls-certs\") pod \"caf43f43-1632-4a05-902b-6c25b8dadf71\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.811938 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-combined-ca-bundle\") pod \"caf43f43-1632-4a05-902b-6c25b8dadf71\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.811955 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-config-data\") pod \"caf43f43-1632-4a05-902b-6c25b8dadf71\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.812001 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-logs\") pod \"caf43f43-1632-4a05-902b-6c25b8dadf71\" (UID: \"caf43f43-1632-4a05-902b-6c25b8dadf71\") " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.812867 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-logs" (OuterVolumeSpecName: "logs") pod "caf43f43-1632-4a05-902b-6c25b8dadf71" (UID: "caf43f43-1632-4a05-902b-6c25b8dadf71"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.816355 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "caf43f43-1632-4a05-902b-6c25b8dadf71" (UID: "caf43f43-1632-4a05-902b-6c25b8dadf71"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.825609 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/caf43f43-1632-4a05-902b-6c25b8dadf71-kube-api-access-dbddt" (OuterVolumeSpecName: "kube-api-access-dbddt") pod "caf43f43-1632-4a05-902b-6c25b8dadf71" (UID: "caf43f43-1632-4a05-902b-6c25b8dadf71"). InnerVolumeSpecName "kube-api-access-dbddt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.826375 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-scripts" (OuterVolumeSpecName: "scripts") pod "caf43f43-1632-4a05-902b-6c25b8dadf71" (UID: "caf43f43-1632-4a05-902b-6c25b8dadf71"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.839490 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "caf43f43-1632-4a05-902b-6c25b8dadf71" (UID: "caf43f43-1632-4a05-902b-6c25b8dadf71"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.901467 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "caf43f43-1632-4a05-902b-6c25b8dadf71" (UID: "caf43f43-1632-4a05-902b-6c25b8dadf71"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.920901 4631 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.920931 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.920940 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.920952 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.920960 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbddt\" (UniqueName: \"kubernetes.io/projected/caf43f43-1632-4a05-902b-6c25b8dadf71-kube-api-access-dbddt\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.920968 4631 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/caf43f43-1632-4a05-902b-6c25b8dadf71-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:19 crc kubenswrapper[4631]: I1129 04:32:19.924634 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "caf43f43-1632-4a05-902b-6c25b8dadf71" (UID: "caf43f43-1632-4a05-902b-6c25b8dadf71"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.005510 4631 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.010645 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-config-data" (OuterVolumeSpecName: "config-data") pod "caf43f43-1632-4a05-902b-6c25b8dadf71" (UID: "caf43f43-1632-4a05-902b-6c25b8dadf71"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.030951 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.031161 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c3760278-593c-4aa1-9ab5-db3403795f2c" containerName="glance-log" containerID="cri-o://c071365af6db32296f28d6d0e12d7003fcb31fa1330ad7c3d1e9c2898fd530f5" gracePeriod=30 Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.031587 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c3760278-593c-4aa1-9ab5-db3403795f2c" containerName="glance-httpd" containerID="cri-o://935b56bfa811c44bf3fe96f6a495cb964d38fa9d8e37d00b97df9921d4e176df" gracePeriod=30 Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.036981 4631 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.037008 4631 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.037020 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caf43f43-1632-4a05-902b-6c25b8dadf71-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.449687 4631 generic.go:334] "Generic (PLEG): container finished" podID="c3760278-593c-4aa1-9ab5-db3403795f2c" containerID="c071365af6db32296f28d6d0e12d7003fcb31fa1330ad7c3d1e9c2898fd530f5" exitCode=143 Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.449740 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c3760278-593c-4aa1-9ab5-db3403795f2c","Type":"ContainerDied","Data":"c071365af6db32296f28d6d0e12d7003fcb31fa1330ad7c3d1e9c2898fd530f5"} Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.451660 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"caf43f43-1632-4a05-902b-6c25b8dadf71","Type":"ContainerDied","Data":"611dc4efbf211a3fffdf73bec7870136b9485656cd01d0e5ac3e754c2f797ab7"} Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.451689 4631 scope.go:117] "RemoveContainer" containerID="6df2a7ecf04d95c354794cb2f4e8b4f282fd1d005dbe3597d2e228968aeb5401" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.451728 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.473523 4631 scope.go:117] "RemoveContainer" containerID="9888df03259e546ad91a21768846ce5b505694ccd1d719cc5de7d804d697b58f" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.496762 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.507610 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.512828 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:32:20 crc kubenswrapper[4631]: E1129 04:32:20.513211 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e666e44-54c6-4b2d-9181-16f640203eff" containerName="mariadb-account-create-update" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513231 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e666e44-54c6-4b2d-9181-16f640203eff" containerName="mariadb-account-create-update" Nov 29 04:32:20 crc kubenswrapper[4631]: E1129 04:32:20.513248 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71733be1-1b42-43f0-8154-997d8b5f800f" containerName="mariadb-account-create-update" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513256 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="71733be1-1b42-43f0-8154-997d8b5f800f" containerName="mariadb-account-create-update" Nov 29 04:32:20 crc kubenswrapper[4631]: E1129 04:32:20.513268 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5607b995-0a8c-43a8-b5f7-116f11d800a8" containerName="mariadb-database-create" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513274 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="5607b995-0a8c-43a8-b5f7-116f11d800a8" containerName="mariadb-database-create" Nov 29 04:32:20 crc kubenswrapper[4631]: E1129 04:32:20.513282 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf43f43-1632-4a05-902b-6c25b8dadf71" containerName="glance-httpd" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513287 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf43f43-1632-4a05-902b-6c25b8dadf71" containerName="glance-httpd" Nov 29 04:32:20 crc kubenswrapper[4631]: E1129 04:32:20.513296 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8591303e-f15b-46c9-bdef-47d5d4cdde4e" containerName="mariadb-database-create" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513302 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8591303e-f15b-46c9-bdef-47d5d4cdde4e" containerName="mariadb-database-create" Nov 29 04:32:20 crc kubenswrapper[4631]: E1129 04:32:20.513318 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c145c685-b279-4265-b673-a3b6e95fcf38" containerName="mariadb-account-create-update" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513508 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="c145c685-b279-4265-b673-a3b6e95fcf38" containerName="mariadb-account-create-update" Nov 29 04:32:20 crc kubenswrapper[4631]: E1129 04:32:20.513522 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55e05641-c7df-46fe-9a80-58539d8980ef" containerName="mariadb-database-create" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513528 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="55e05641-c7df-46fe-9a80-58539d8980ef" containerName="mariadb-database-create" Nov 29 04:32:20 crc kubenswrapper[4631]: E1129 04:32:20.513538 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caf43f43-1632-4a05-902b-6c25b8dadf71" containerName="glance-log" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513544 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="caf43f43-1632-4a05-902b-6c25b8dadf71" containerName="glance-log" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513716 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf43f43-1632-4a05-902b-6c25b8dadf71" containerName="glance-httpd" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513728 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e666e44-54c6-4b2d-9181-16f640203eff" containerName="mariadb-account-create-update" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513736 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="c145c685-b279-4265-b673-a3b6e95fcf38" containerName="mariadb-account-create-update" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513748 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="5607b995-0a8c-43a8-b5f7-116f11d800a8" containerName="mariadb-database-create" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513761 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="8591303e-f15b-46c9-bdef-47d5d4cdde4e" containerName="mariadb-database-create" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513776 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="caf43f43-1632-4a05-902b-6c25b8dadf71" containerName="glance-log" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513783 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="71733be1-1b42-43f0-8154-997d8b5f800f" containerName="mariadb-account-create-update" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.513794 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="55e05641-c7df-46fe-9a80-58539d8980ef" containerName="mariadb-database-create" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.514706 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.519603 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.519778 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.525649 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.610408 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.639755 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5964d597b6-rfcr2" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.645676 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a356aef9-8e14-4c39-92b9-d32402e357ad-logs\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.645741 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.645784 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.645801 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a356aef9-8e14-4c39-92b9-d32402e357ad-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.645828 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.645859 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmhg2\" (UniqueName: \"kubernetes.io/projected/a356aef9-8e14-4c39-92b9-d32402e357ad-kube-api-access-nmhg2\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.645878 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.645920 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.710238 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-76fdc69464-qvs2b"] Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.716349 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.716399 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.716439 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.717126 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b907ff8791c5156baa82f06284d01d372fbfcb2495bb80ab099417356b8d8104"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.717168 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://b907ff8791c5156baa82f06284d01d372fbfcb2495bb80ab099417356b8d8104" gracePeriod=600 Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.747523 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.747590 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.747676 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a356aef9-8e14-4c39-92b9-d32402e357ad-logs\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.747746 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.747793 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.747808 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a356aef9-8e14-4c39-92b9-d32402e357ad-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.747831 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.747884 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmhg2\" (UniqueName: \"kubernetes.io/projected/a356aef9-8e14-4c39-92b9-d32402e357ad-kube-api-access-nmhg2\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.748768 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a356aef9-8e14-4c39-92b9-d32402e357ad-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.750296 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.755538 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.760564 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a356aef9-8e14-4c39-92b9-d32402e357ad-logs\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.761345 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.761349 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.762469 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a356aef9-8e14-4c39-92b9-d32402e357ad-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.770454 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmhg2\" (UniqueName: \"kubernetes.io/projected/a356aef9-8e14-4c39-92b9-d32402e357ad-kube-api-access-nmhg2\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.791394 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"a356aef9-8e14-4c39-92b9-d32402e357ad\") " pod="openstack/glance-default-internal-api-0" Nov 29 04:32:20 crc kubenswrapper[4631]: I1129 04:32:20.832314 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:21 crc kubenswrapper[4631]: I1129 04:32:21.234496 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="caf43f43-1632-4a05-902b-6c25b8dadf71" path="/var/lib/kubelet/pods/caf43f43-1632-4a05-902b-6c25b8dadf71/volumes" Nov 29 04:32:21 crc kubenswrapper[4631]: I1129 04:32:21.464285 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 29 04:32:21 crc kubenswrapper[4631]: I1129 04:32:21.467295 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="b907ff8791c5156baa82f06284d01d372fbfcb2495bb80ab099417356b8d8104" exitCode=0 Nov 29 04:32:21 crc kubenswrapper[4631]: I1129 04:32:21.467419 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"b907ff8791c5156baa82f06284d01d372fbfcb2495bb80ab099417356b8d8104"} Nov 29 04:32:21 crc kubenswrapper[4631]: I1129 04:32:21.467472 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"9d1f5550d839094fbb4da7f02f707158ef23902852d0f7dc1490b7a66c7dc987"} Nov 29 04:32:21 crc kubenswrapper[4631]: I1129 04:32:21.467493 4631 scope.go:117] "RemoveContainer" containerID="0d61c7b70ecd9c7737b4b7e588d56ad7e8044dda6cfe17bbe23a704a996d9bc8" Nov 29 04:32:21 crc kubenswrapper[4631]: I1129 04:32:21.467491 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon-log" containerID="cri-o://b8821366a8d7bbbc829496fae08b6db06349f21d4ce52f873b767b5bf290f050" gracePeriod=30 Nov 29 04:32:21 crc kubenswrapper[4631]: I1129 04:32:21.467709 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" containerID="cri-o://229305a7e7722d15999a5144c7f2a46b050e7800a04c60576976efcb74cd5340" gracePeriod=30 Nov 29 04:32:21 crc kubenswrapper[4631]: W1129 04:32:21.470288 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda356aef9_8e14_4c39_92b9_d32402e357ad.slice/crio-e8bea5c1508a4af940fcb4ca04727c29a0ddb3f3a7541e1ed64848073eb43f70 WatchSource:0}: Error finding container e8bea5c1508a4af940fcb4ca04727c29a0ddb3f3a7541e1ed64848073eb43f70: Status 404 returned error can't find the container with id e8bea5c1508a4af940fcb4ca04727c29a0ddb3f3a7541e1ed64848073eb43f70 Nov 29 04:32:22 crc kubenswrapper[4631]: I1129 04:32:22.481864 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a356aef9-8e14-4c39-92b9-d32402e357ad","Type":"ContainerStarted","Data":"58db10a46fb558db892a0b4585e5dc228b1b60846b616bd6a849da655fe0c2c4"} Nov 29 04:32:22 crc kubenswrapper[4631]: I1129 04:32:22.482148 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a356aef9-8e14-4c39-92b9-d32402e357ad","Type":"ContainerStarted","Data":"e8bea5c1508a4af940fcb4ca04727c29a0ddb3f3a7541e1ed64848073eb43f70"} Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.503458 4631 generic.go:334] "Generic (PLEG): container finished" podID="c3760278-593c-4aa1-9ab5-db3403795f2c" containerID="935b56bfa811c44bf3fe96f6a495cb964d38fa9d8e37d00b97df9921d4e176df" exitCode=0 Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.503581 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c3760278-593c-4aa1-9ab5-db3403795f2c","Type":"ContainerDied","Data":"935b56bfa811c44bf3fe96f6a495cb964d38fa9d8e37d00b97df9921d4e176df"} Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.525054 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a356aef9-8e14-4c39-92b9-d32402e357ad","Type":"ContainerStarted","Data":"57a43b05ef5539336106eb281ed94d7c84c2ce096b29fdf4871e305f84f8920f"} Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.560619 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.560600981 podStartE2EDuration="3.560600981s" podCreationTimestamp="2025-11-29 04:32:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:32:23.549370756 +0000 UTC m=+1270.613874270" watchObservedRunningTime="2025-11-29 04:32:23.560600981 +0000 UTC m=+1270.625104495" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.680513 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.798565 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-config-data\") pod \"c3760278-593c-4aa1-9ab5-db3403795f2c\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.798725 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-combined-ca-bundle\") pod \"c3760278-593c-4aa1-9ab5-db3403795f2c\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.798831 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-scripts\") pod \"c3760278-593c-4aa1-9ab5-db3403795f2c\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.798940 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-public-tls-certs\") pod \"c3760278-593c-4aa1-9ab5-db3403795f2c\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.799008 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-httpd-run\") pod \"c3760278-593c-4aa1-9ab5-db3403795f2c\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.799099 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-logs\") pod \"c3760278-593c-4aa1-9ab5-db3403795f2c\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.799177 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"c3760278-593c-4aa1-9ab5-db3403795f2c\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.799291 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctsth\" (UniqueName: \"kubernetes.io/projected/c3760278-593c-4aa1-9ab5-db3403795f2c-kube-api-access-ctsth\") pod \"c3760278-593c-4aa1-9ab5-db3403795f2c\" (UID: \"c3760278-593c-4aa1-9ab5-db3403795f2c\") " Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.801039 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-logs" (OuterVolumeSpecName: "logs") pod "c3760278-593c-4aa1-9ab5-db3403795f2c" (UID: "c3760278-593c-4aa1-9ab5-db3403795f2c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.801195 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c3760278-593c-4aa1-9ab5-db3403795f2c" (UID: "c3760278-593c-4aa1-9ab5-db3403795f2c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.806645 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "c3760278-593c-4aa1-9ab5-db3403795f2c" (UID: "c3760278-593c-4aa1-9ab5-db3403795f2c"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.806718 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3760278-593c-4aa1-9ab5-db3403795f2c-kube-api-access-ctsth" (OuterVolumeSpecName: "kube-api-access-ctsth") pod "c3760278-593c-4aa1-9ab5-db3403795f2c" (UID: "c3760278-593c-4aa1-9ab5-db3403795f2c"). InnerVolumeSpecName "kube-api-access-ctsth". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.810441 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-scripts" (OuterVolumeSpecName: "scripts") pod "c3760278-593c-4aa1-9ab5-db3403795f2c" (UID: "c3760278-593c-4aa1-9ab5-db3403795f2c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.864417 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-config-data" (OuterVolumeSpecName: "config-data") pod "c3760278-593c-4aa1-9ab5-db3403795f2c" (UID: "c3760278-593c-4aa1-9ab5-db3403795f2c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.884968 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3760278-593c-4aa1-9ab5-db3403795f2c" (UID: "c3760278-593c-4aa1-9ab5-db3403795f2c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.901727 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.901754 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.901765 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.901773 4631 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.901781 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3760278-593c-4aa1-9ab5-db3403795f2c-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.901804 4631 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.901814 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctsth\" (UniqueName: \"kubernetes.io/projected/c3760278-593c-4aa1-9ab5-db3403795f2c-kube-api-access-ctsth\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.930184 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c3760278-593c-4aa1-9ab5-db3403795f2c" (UID: "c3760278-593c-4aa1-9ab5-db3403795f2c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:23 crc kubenswrapper[4631]: I1129 04:32:23.931077 4631 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.002831 4631 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3760278-593c-4aa1-9ab5-db3403795f2c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.002851 4631 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:24 crc kubenswrapper[4631]: E1129 04:32:24.510927 4631 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84d7e0f9_cbce_46f7_ba28_205eb1867850.slice/crio-conmon-109851c2b06aa6405f17ffa5b6c6c860f37b6b1491f8dd602a7dae95ebceef70.scope\": RecentStats: unable to find data in memory cache]" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.551204 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c3760278-593c-4aa1-9ab5-db3403795f2c","Type":"ContainerDied","Data":"81d177e0dfcdca12fa3773440103f534cd897eefecc759ad032d3937b9e852b9"} Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.551253 4631 scope.go:117] "RemoveContainer" containerID="935b56bfa811c44bf3fe96f6a495cb964d38fa9d8e37d00b97df9921d4e176df" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.552010 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.566406 4631 generic.go:334] "Generic (PLEG): container finished" podID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerID="109851c2b06aa6405f17ffa5b6c6c860f37b6b1491f8dd602a7dae95ebceef70" exitCode=0 Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.567547 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84d7e0f9-cbce-46f7-ba28-205eb1867850","Type":"ContainerDied","Data":"109851c2b06aa6405f17ffa5b6c6c860f37b6b1491f8dd602a7dae95ebceef70"} Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.597615 4631 scope.go:117] "RemoveContainer" containerID="c071365af6db32296f28d6d0e12d7003fcb31fa1330ad7c3d1e9c2898fd530f5" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.603973 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-cb52v"] Nov 29 04:32:24 crc kubenswrapper[4631]: E1129 04:32:24.604381 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3760278-593c-4aa1-9ab5-db3403795f2c" containerName="glance-log" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.604392 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3760278-593c-4aa1-9ab5-db3403795f2c" containerName="glance-log" Nov 29 04:32:24 crc kubenswrapper[4631]: E1129 04:32:24.604411 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3760278-593c-4aa1-9ab5-db3403795f2c" containerName="glance-httpd" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.604418 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3760278-593c-4aa1-9ab5-db3403795f2c" containerName="glance-httpd" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.604588 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3760278-593c-4aa1-9ab5-db3403795f2c" containerName="glance-httpd" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.604617 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3760278-593c-4aa1-9ab5-db3403795f2c" containerName="glance-log" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.605159 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.608814 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-m2b45" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.609016 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.609271 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.623580 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.671632 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:50750->10.217.0.143:8443: read: connection reset by peer" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.717253 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.726710 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-scripts\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.726979 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nprs5\" (UniqueName: \"kubernetes.io/projected/e4a6498b-07c7-4d19-b5a3-49773fa023a7-kube-api-access-nprs5\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.727068 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.727116 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-config-data\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.781564 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-cb52v"] Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.808597 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.811433 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.818989 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.820792 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.832352 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nprs5\" (UniqueName: \"kubernetes.io/projected/e4a6498b-07c7-4d19-b5a3-49773fa023a7-kube-api-access-nprs5\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.832647 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.832683 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-config-data\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.832713 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-scripts\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.834898 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.843084 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-config-data\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.866846 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-scripts\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.890628 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.912993 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nprs5\" (UniqueName: \"kubernetes.io/projected/e4a6498b-07c7-4d19-b5a3-49773fa023a7-kube-api-access-nprs5\") pod \"nova-cell0-conductor-db-sync-cb52v\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.939779 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.940304 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b904c4b-913c-4fc2-8037-94300918d367-logs\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.940371 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-scripts\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.940420 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.940512 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-config-data\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.940532 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0b904c4b-913c-4fc2-8037-94300918d367-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.940552 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.940571 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:24 crc kubenswrapper[4631]: I1129 04:32:24.940597 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c2pc\" (UniqueName: \"kubernetes.io/projected/0b904c4b-913c-4fc2-8037-94300918d367-kube-api-access-2c2pc\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.028876 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.041814 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-config-data\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.042039 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0b904c4b-913c-4fc2-8037-94300918d367-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.042125 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.042241 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.042359 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c2pc\" (UniqueName: \"kubernetes.io/projected/0b904c4b-913c-4fc2-8037-94300918d367-kube-api-access-2c2pc\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.042474 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b904c4b-913c-4fc2-8037-94300918d367-logs\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.042575 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-scripts\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.042691 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.047751 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0b904c4b-913c-4fc2-8037-94300918d367-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.048324 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.048531 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b904c4b-913c-4fc2-8037-94300918d367-logs\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.048906 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.050863 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.051110 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-scripts\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.059272 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b904c4b-913c-4fc2-8037-94300918d367-config-data\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.074950 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c2pc\" (UniqueName: \"kubernetes.io/projected/0b904c4b-913c-4fc2-8037-94300918d367-kube-api-access-2c2pc\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.129420 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"0b904c4b-913c-4fc2-8037-94300918d367\") " pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.144491 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-sg-core-conf-yaml\") pod \"84d7e0f9-cbce-46f7-ba28-205eb1867850\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.144569 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-config-data\") pod \"84d7e0f9-cbce-46f7-ba28-205eb1867850\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.144606 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-log-httpd\") pod \"84d7e0f9-cbce-46f7-ba28-205eb1867850\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.144698 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtcgl\" (UniqueName: \"kubernetes.io/projected/84d7e0f9-cbce-46f7-ba28-205eb1867850-kube-api-access-dtcgl\") pod \"84d7e0f9-cbce-46f7-ba28-205eb1867850\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.144733 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-scripts\") pod \"84d7e0f9-cbce-46f7-ba28-205eb1867850\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.144805 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-run-httpd\") pod \"84d7e0f9-cbce-46f7-ba28-205eb1867850\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.144867 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-combined-ca-bundle\") pod \"84d7e0f9-cbce-46f7-ba28-205eb1867850\" (UID: \"84d7e0f9-cbce-46f7-ba28-205eb1867850\") " Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.146763 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "84d7e0f9-cbce-46f7-ba28-205eb1867850" (UID: "84d7e0f9-cbce-46f7-ba28-205eb1867850"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.152831 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "84d7e0f9-cbce-46f7-ba28-205eb1867850" (UID: "84d7e0f9-cbce-46f7-ba28-205eb1867850"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.161739 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-scripts" (OuterVolumeSpecName: "scripts") pod "84d7e0f9-cbce-46f7-ba28-205eb1867850" (UID: "84d7e0f9-cbce-46f7-ba28-205eb1867850"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.166775 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84d7e0f9-cbce-46f7-ba28-205eb1867850-kube-api-access-dtcgl" (OuterVolumeSpecName: "kube-api-access-dtcgl") pod "84d7e0f9-cbce-46f7-ba28-205eb1867850" (UID: "84d7e0f9-cbce-46f7-ba28-205eb1867850"). InnerVolumeSpecName "kube-api-access-dtcgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.170861 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.248256 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3760278-593c-4aa1-9ab5-db3403795f2c" path="/var/lib/kubelet/pods/c3760278-593c-4aa1-9ab5-db3403795f2c/volumes" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.250148 4631 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.250171 4631 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84d7e0f9-cbce-46f7-ba28-205eb1867850-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.250181 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtcgl\" (UniqueName: \"kubernetes.io/projected/84d7e0f9-cbce-46f7-ba28-205eb1867850-kube-api-access-dtcgl\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.250190 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.294439 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "84d7e0f9-cbce-46f7-ba28-205eb1867850" (UID: "84d7e0f9-cbce-46f7-ba28-205eb1867850"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.301144 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "84d7e0f9-cbce-46f7-ba28-205eb1867850" (UID: "84d7e0f9-cbce-46f7-ba28-205eb1867850"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.324445 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-config-data" (OuterVolumeSpecName: "config-data") pod "84d7e0f9-cbce-46f7-ba28-205eb1867850" (UID: "84d7e0f9-cbce-46f7-ba28-205eb1867850"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.354110 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.354136 4631 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.354160 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84d7e0f9-cbce-46f7-ba28-205eb1867850-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.536927 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-cb52v"] Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.607308 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-cb52v" event={"ID":"e4a6498b-07c7-4d19-b5a3-49773fa023a7","Type":"ContainerStarted","Data":"3ddc5edb6f73cb73f4a514cb2ba9f89e2cac940376430c0df430d4a523eaab06"} Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.609206 4631 generic.go:334] "Generic (PLEG): container finished" podID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerID="229305a7e7722d15999a5144c7f2a46b050e7800a04c60576976efcb74cd5340" exitCode=0 Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.609243 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76fdc69464-qvs2b" event={"ID":"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14","Type":"ContainerDied","Data":"229305a7e7722d15999a5144c7f2a46b050e7800a04c60576976efcb74cd5340"} Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.609266 4631 scope.go:117] "RemoveContainer" containerID="8b44c75824934f7b87d14cf3fa1b963da97caaa8b8cf8a2df430157835986df1" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.611573 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84d7e0f9-cbce-46f7-ba28-205eb1867850","Type":"ContainerDied","Data":"1cf9aa3b401bffd5f7ccf5f9a111fdc6ae98a0df390f2bc4b606a5e51897423d"} Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.611706 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.652222 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.670870 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.681167 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:25 crc kubenswrapper[4631]: E1129 04:32:25.681668 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="sg-core" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.681731 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="sg-core" Nov 29 04:32:25 crc kubenswrapper[4631]: E1129 04:32:25.681787 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="ceilometer-central-agent" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.681835 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="ceilometer-central-agent" Nov 29 04:32:25 crc kubenswrapper[4631]: E1129 04:32:25.691799 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="proxy-httpd" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.692019 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="proxy-httpd" Nov 29 04:32:25 crc kubenswrapper[4631]: E1129 04:32:25.692093 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="ceilometer-notification-agent" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.692351 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="ceilometer-notification-agent" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.692707 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="ceilometer-central-agent" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.692805 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="sg-core" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.692868 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="ceilometer-notification-agent" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.692927 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" containerName="proxy-httpd" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.709864 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.710121 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.713072 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.713220 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.867497 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-run-httpd\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.867736 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.867816 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-log-httpd\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.867883 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6g6s5\" (UniqueName: \"kubernetes.io/projected/34148105-3978-44c8-a28f-ac93b2e4ed06-kube-api-access-6g6s5\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.868030 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.868135 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-scripts\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.868205 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-config-data\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.868352 4631 scope.go:117] "RemoveContainer" containerID="16e2736df261c601512ebe0ed328dd85e41e9eab2d0ed4e7ab55734cc2921175" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.878695 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.940422 4631 scope.go:117] "RemoveContainer" containerID="68d56f0cdf5148f92cca96806afa0d0c5d78bfde69e7985e10b286448009123a" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.969889 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-scripts\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.969934 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-config-data\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.970021 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-run-httpd\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.970045 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.970069 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-log-httpd\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.970089 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6g6s5\" (UniqueName: \"kubernetes.io/projected/34148105-3978-44c8-a28f-ac93b2e4ed06-kube-api-access-6g6s5\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.970145 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.970826 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-log-httpd\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.970953 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-run-httpd\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.976086 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-config-data\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.978853 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.983495 4631 scope.go:117] "RemoveContainer" containerID="375024c9a7f4aff024c3cfc271b48911bb45b3a08c7aae42e148faac4c083382" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.983893 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.984474 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-scripts\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:25 crc kubenswrapper[4631]: I1129 04:32:25.990426 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6g6s5\" (UniqueName: \"kubernetes.io/projected/34148105-3978-44c8-a28f-ac93b2e4ed06-kube-api-access-6g6s5\") pod \"ceilometer-0\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " pod="openstack/ceilometer-0" Nov 29 04:32:26 crc kubenswrapper[4631]: I1129 04:32:26.028226 4631 scope.go:117] "RemoveContainer" containerID="109851c2b06aa6405f17ffa5b6c6c860f37b6b1491f8dd602a7dae95ebceef70" Nov 29 04:32:26 crc kubenswrapper[4631]: I1129 04:32:26.029092 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:26 crc kubenswrapper[4631]: I1129 04:32:26.572605 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:26 crc kubenswrapper[4631]: I1129 04:32:26.679304 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0b904c4b-913c-4fc2-8037-94300918d367","Type":"ContainerStarted","Data":"1980f9f3c5396ca96ade1367971c199aac58755bea94e119618c80595655dd89"} Nov 29 04:32:26 crc kubenswrapper[4631]: I1129 04:32:26.689093 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"34148105-3978-44c8-a28f-ac93b2e4ed06","Type":"ContainerStarted","Data":"884204bc72397b816d8462d278364201fffdaeaa3620abc148eb9e494b15f91f"} Nov 29 04:32:27 crc kubenswrapper[4631]: I1129 04:32:27.244688 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84d7e0f9-cbce-46f7-ba28-205eb1867850" path="/var/lib/kubelet/pods/84d7e0f9-cbce-46f7-ba28-205eb1867850/volumes" Nov 29 04:32:27 crc kubenswrapper[4631]: I1129 04:32:27.712610 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0b904c4b-913c-4fc2-8037-94300918d367","Type":"ContainerStarted","Data":"938f1d6b9ad22d5223774f10928d53c25972ceda6a5eb378f1cf4f81fb45e889"} Nov 29 04:32:27 crc kubenswrapper[4631]: I1129 04:32:27.712886 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0b904c4b-913c-4fc2-8037-94300918d367","Type":"ContainerStarted","Data":"c3a895d635b80aafc2fbd191ff7ea1a6406f460c3b43002a84eb8d296fb8f87b"} Nov 29 04:32:27 crc kubenswrapper[4631]: I1129 04:32:27.714622 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"34148105-3978-44c8-a28f-ac93b2e4ed06","Type":"ContainerStarted","Data":"d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02"} Nov 29 04:32:27 crc kubenswrapper[4631]: I1129 04:32:27.737670 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.737643353 podStartE2EDuration="3.737643353s" podCreationTimestamp="2025-11-29 04:32:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:32:27.727937586 +0000 UTC m=+1274.792441100" watchObservedRunningTime="2025-11-29 04:32:27.737643353 +0000 UTC m=+1274.802146877" Nov 29 04:32:28 crc kubenswrapper[4631]: I1129 04:32:28.727829 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"34148105-3978-44c8-a28f-ac93b2e4ed06","Type":"ContainerStarted","Data":"5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8"} Nov 29 04:32:29 crc kubenswrapper[4631]: I1129 04:32:29.746552 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"34148105-3978-44c8-a28f-ac93b2e4ed06","Type":"ContainerStarted","Data":"15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359"} Nov 29 04:32:30 crc kubenswrapper[4631]: I1129 04:32:30.837508 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:30 crc kubenswrapper[4631]: I1129 04:32:30.837812 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:30 crc kubenswrapper[4631]: I1129 04:32:30.879574 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:30 crc kubenswrapper[4631]: I1129 04:32:30.902277 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:31 crc kubenswrapper[4631]: I1129 04:32:31.765766 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:31 crc kubenswrapper[4631]: I1129 04:32:31.766175 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:33 crc kubenswrapper[4631]: I1129 04:32:33.141414 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 29 04:32:35 crc kubenswrapper[4631]: I1129 04:32:35.171224 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 29 04:32:35 crc kubenswrapper[4631]: I1129 04:32:35.171972 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 29 04:32:35 crc kubenswrapper[4631]: I1129 04:32:35.231160 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 29 04:32:35 crc kubenswrapper[4631]: I1129 04:32:35.245368 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 29 04:32:35 crc kubenswrapper[4631]: I1129 04:32:35.805381 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 29 04:32:35 crc kubenswrapper[4631]: I1129 04:32:35.805422 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 29 04:32:36 crc kubenswrapper[4631]: I1129 04:32:36.378134 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:36 crc kubenswrapper[4631]: I1129 04:32:36.378482 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:32:36 crc kubenswrapper[4631]: I1129 04:32:36.391578 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 29 04:32:37 crc kubenswrapper[4631]: I1129 04:32:37.935180 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 29 04:32:37 crc kubenswrapper[4631]: I1129 04:32:37.936417 4631 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 29 04:32:37 crc kubenswrapper[4631]: I1129 04:32:37.966863 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 29 04:32:40 crc kubenswrapper[4631]: I1129 04:32:40.856937 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-cb52v" event={"ID":"e4a6498b-07c7-4d19-b5a3-49773fa023a7","Type":"ContainerStarted","Data":"100c6e5c68f68bdeefe2f7b5ce33c8a3b8ac9bb9df2b102c7cfda3bac6b9b029"} Nov 29 04:32:40 crc kubenswrapper[4631]: I1129 04:32:40.862129 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"34148105-3978-44c8-a28f-ac93b2e4ed06","Type":"ContainerStarted","Data":"bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b"} Nov 29 04:32:40 crc kubenswrapper[4631]: I1129 04:32:40.863071 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 04:32:40 crc kubenswrapper[4631]: I1129 04:32:40.904419 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.367633621 podStartE2EDuration="15.904397941s" podCreationTimestamp="2025-11-29 04:32:25 +0000 UTC" firstStartedPulling="2025-11-29 04:32:26.588399619 +0000 UTC m=+1273.652903123" lastFinishedPulling="2025-11-29 04:32:40.125163929 +0000 UTC m=+1287.189667443" observedRunningTime="2025-11-29 04:32:40.896135319 +0000 UTC m=+1287.960638833" watchObservedRunningTime="2025-11-29 04:32:40.904397941 +0000 UTC m=+1287.968901455" Nov 29 04:32:40 crc kubenswrapper[4631]: I1129 04:32:40.906251 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-cb52v" podStartSLOduration=2.301250002 podStartE2EDuration="16.906245286s" podCreationTimestamp="2025-11-29 04:32:24 +0000 UTC" firstStartedPulling="2025-11-29 04:32:25.519593211 +0000 UTC m=+1272.584096725" lastFinishedPulling="2025-11-29 04:32:40.124588495 +0000 UTC m=+1287.189092009" observedRunningTime="2025-11-29 04:32:40.87612542 +0000 UTC m=+1287.940628974" watchObservedRunningTime="2025-11-29 04:32:40.906245286 +0000 UTC m=+1287.970748800" Nov 29 04:32:43 crc kubenswrapper[4631]: I1129 04:32:43.142196 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-76fdc69464-qvs2b" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Nov 29 04:32:43 crc kubenswrapper[4631]: I1129 04:32:43.143692 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:32:48 crc kubenswrapper[4631]: I1129 04:32:48.861417 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:48 crc kubenswrapper[4631]: I1129 04:32:48.862614 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="proxy-httpd" containerID="cri-o://bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b" gracePeriod=30 Nov 29 04:32:48 crc kubenswrapper[4631]: I1129 04:32:48.862722 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="ceilometer-notification-agent" containerID="cri-o://5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8" gracePeriod=30 Nov 29 04:32:48 crc kubenswrapper[4631]: I1129 04:32:48.862660 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="sg-core" containerID="cri-o://15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359" gracePeriod=30 Nov 29 04:32:48 crc kubenswrapper[4631]: I1129 04:32:48.862944 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="ceilometer-central-agent" containerID="cri-o://d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02" gracePeriod=30 Nov 29 04:32:49 crc kubenswrapper[4631]: I1129 04:32:49.952837 4631 generic.go:334] "Generic (PLEG): container finished" podID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerID="bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b" exitCode=0 Nov 29 04:32:49 crc kubenswrapper[4631]: I1129 04:32:49.953107 4631 generic.go:334] "Generic (PLEG): container finished" podID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerID="15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359" exitCode=2 Nov 29 04:32:49 crc kubenswrapper[4631]: I1129 04:32:49.953118 4631 generic.go:334] "Generic (PLEG): container finished" podID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerID="d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02" exitCode=0 Nov 29 04:32:49 crc kubenswrapper[4631]: I1129 04:32:49.952930 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"34148105-3978-44c8-a28f-ac93b2e4ed06","Type":"ContainerDied","Data":"bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b"} Nov 29 04:32:49 crc kubenswrapper[4631]: I1129 04:32:49.953158 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"34148105-3978-44c8-a28f-ac93b2e4ed06","Type":"ContainerDied","Data":"15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359"} Nov 29 04:32:49 crc kubenswrapper[4631]: I1129 04:32:49.953177 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"34148105-3978-44c8-a28f-ac93b2e4ed06","Type":"ContainerDied","Data":"d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02"} Nov 29 04:32:50 crc kubenswrapper[4631]: I1129 04:32:50.985817 4631 generic.go:334] "Generic (PLEG): container finished" podID="e4a6498b-07c7-4d19-b5a3-49773fa023a7" containerID="100c6e5c68f68bdeefe2f7b5ce33c8a3b8ac9bb9df2b102c7cfda3bac6b9b029" exitCode=0 Nov 29 04:32:50 crc kubenswrapper[4631]: I1129 04:32:50.985894 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-cb52v" event={"ID":"e4a6498b-07c7-4d19-b5a3-49773fa023a7","Type":"ContainerDied","Data":"100c6e5c68f68bdeefe2f7b5ce33c8a3b8ac9bb9df2b102c7cfda3bac6b9b029"} Nov 29 04:32:51 crc kubenswrapper[4631]: I1129 04:32:51.986194 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:51 crc kubenswrapper[4631]: I1129 04:32:51.996712 4631 generic.go:334] "Generic (PLEG): container finished" podID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerID="5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8" exitCode=0 Nov 29 04:32:51 crc kubenswrapper[4631]: I1129 04:32:51.996785 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"34148105-3978-44c8-a28f-ac93b2e4ed06","Type":"ContainerDied","Data":"5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8"} Nov 29 04:32:51 crc kubenswrapper[4631]: I1129 04:32:51.996811 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"34148105-3978-44c8-a28f-ac93b2e4ed06","Type":"ContainerDied","Data":"884204bc72397b816d8462d278364201fffdaeaa3620abc148eb9e494b15f91f"} Nov 29 04:32:51 crc kubenswrapper[4631]: I1129 04:32:51.996843 4631 scope.go:117] "RemoveContainer" containerID="bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b" Nov 29 04:32:51 crc kubenswrapper[4631]: I1129 04:32:51.997075 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.001385 4631 generic.go:334] "Generic (PLEG): container finished" podID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerID="b8821366a8d7bbbc829496fae08b6db06349f21d4ce52f873b767b5bf290f050" exitCode=137 Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.001434 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76fdc69464-qvs2b" event={"ID":"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14","Type":"ContainerDied","Data":"b8821366a8d7bbbc829496fae08b6db06349f21d4ce52f873b767b5bf290f050"} Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.018957 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-log-httpd\") pod \"34148105-3978-44c8-a28f-ac93b2e4ed06\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.019007 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6s5\" (UniqueName: \"kubernetes.io/projected/34148105-3978-44c8-a28f-ac93b2e4ed06-kube-api-access-6g6s5\") pod \"34148105-3978-44c8-a28f-ac93b2e4ed06\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.019230 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-combined-ca-bundle\") pod \"34148105-3978-44c8-a28f-ac93b2e4ed06\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.019248 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-scripts\") pod \"34148105-3978-44c8-a28f-ac93b2e4ed06\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.019303 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-config-data\") pod \"34148105-3978-44c8-a28f-ac93b2e4ed06\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.019390 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-run-httpd\") pod \"34148105-3978-44c8-a28f-ac93b2e4ed06\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.019508 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-sg-core-conf-yaml\") pod \"34148105-3978-44c8-a28f-ac93b2e4ed06\" (UID: \"34148105-3978-44c8-a28f-ac93b2e4ed06\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.025063 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "34148105-3978-44c8-a28f-ac93b2e4ed06" (UID: "34148105-3978-44c8-a28f-ac93b2e4ed06"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.026879 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "34148105-3978-44c8-a28f-ac93b2e4ed06" (UID: "34148105-3978-44c8-a28f-ac93b2e4ed06"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.034817 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34148105-3978-44c8-a28f-ac93b2e4ed06-kube-api-access-6g6s5" (OuterVolumeSpecName: "kube-api-access-6g6s5") pod "34148105-3978-44c8-a28f-ac93b2e4ed06" (UID: "34148105-3978-44c8-a28f-ac93b2e4ed06"). InnerVolumeSpecName "kube-api-access-6g6s5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.040083 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-scripts" (OuterVolumeSpecName: "scripts") pod "34148105-3978-44c8-a28f-ac93b2e4ed06" (UID: "34148105-3978-44c8-a28f-ac93b2e4ed06"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.084283 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.095979 4631 scope.go:117] "RemoveContainer" containerID="15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.107511 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "34148105-3978-44c8-a28f-ac93b2e4ed06" (UID: "34148105-3978-44c8-a28f-ac93b2e4ed06"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.123661 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-config-data\") pod \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.123739 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-scripts\") pod \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.123764 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-secret-key\") pod \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.123808 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-logs\") pod \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.123886 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-tls-certs\") pod \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.123958 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-combined-ca-bundle\") pod \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.124036 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xprll\" (UniqueName: \"kubernetes.io/projected/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-kube-api-access-xprll\") pod \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\" (UID: \"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.124419 4631 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.124430 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6s5\" (UniqueName: \"kubernetes.io/projected/34148105-3978-44c8-a28f-ac93b2e4ed06-kube-api-access-6g6s5\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.124440 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.124447 4631 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/34148105-3978-44c8-a28f-ac93b2e4ed06-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.124456 4631 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.126404 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-logs" (OuterVolumeSpecName: "logs") pod "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" (UID: "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.130211 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-kube-api-access-xprll" (OuterVolumeSpecName: "kube-api-access-xprll") pod "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" (UID: "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14"). InnerVolumeSpecName "kube-api-access-xprll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.130382 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" (UID: "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.149318 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-config-data" (OuterVolumeSpecName: "config-data") pod "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" (UID: "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.156124 4631 scope.go:117] "RemoveContainer" containerID="5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.174884 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-scripts" (OuterVolumeSpecName: "scripts") pod "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" (UID: "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.176582 4631 scope.go:117] "RemoveContainer" containerID="d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.176747 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" (UID: "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.183117 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34148105-3978-44c8-a28f-ac93b2e4ed06" (UID: "34148105-3978-44c8-a28f-ac93b2e4ed06"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.203015 4631 scope.go:117] "RemoveContainer" containerID="bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.203888 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b\": container with ID starting with bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b not found: ID does not exist" containerID="bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.203942 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b"} err="failed to get container status \"bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b\": rpc error: code = NotFound desc = could not find container \"bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b\": container with ID starting with bcc845662fe1e42551af0fc7f4f0f14ce3b9925bd593bc0934b8390a16023f5b not found: ID does not exist" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.203975 4631 scope.go:117] "RemoveContainer" containerID="15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.204561 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359\": container with ID starting with 15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359 not found: ID does not exist" containerID="15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.204587 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359"} err="failed to get container status \"15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359\": rpc error: code = NotFound desc = could not find container \"15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359\": container with ID starting with 15d91514e69e879b4b217d4af41827f4a3e3fc894589d8d8f91f1d069675d359 not found: ID does not exist" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.204606 4631 scope.go:117] "RemoveContainer" containerID="5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.205059 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8\": container with ID starting with 5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8 not found: ID does not exist" containerID="5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.205094 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8"} err="failed to get container status \"5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8\": rpc error: code = NotFound desc = could not find container \"5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8\": container with ID starting with 5a42d94c551e79e3b53bc076de36adcb434386c7f9e75aed8f91ebaecb51a8e8 not found: ID does not exist" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.205121 4631 scope.go:117] "RemoveContainer" containerID="d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.205338 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02\": container with ID starting with d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02 not found: ID does not exist" containerID="d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.205353 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02"} err="failed to get container status \"d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02\": rpc error: code = NotFound desc = could not find container \"d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02\": container with ID starting with d78dcb7d1a71fbd2f45ec1c323a8ee6c1e923f2158ea31acfbd666caf4921e02 not found: ID does not exist" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.210736 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-config-data" (OuterVolumeSpecName: "config-data") pod "34148105-3978-44c8-a28f-ac93b2e4ed06" (UID: "34148105-3978-44c8-a28f-ac93b2e4ed06"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.212275 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" (UID: "4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.228186 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.228206 4631 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.228216 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34148105-3978-44c8-a28f-ac93b2e4ed06-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.228226 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.228235 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xprll\" (UniqueName: \"kubernetes.io/projected/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-kube-api-access-xprll\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.228244 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.228252 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.228260 4631 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.228268 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.291477 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.329802 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-combined-ca-bundle\") pod \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.329905 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nprs5\" (UniqueName: \"kubernetes.io/projected/e4a6498b-07c7-4d19-b5a3-49773fa023a7-kube-api-access-nprs5\") pod \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.330001 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-config-data\") pod \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.330124 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-scripts\") pod \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\" (UID: \"e4a6498b-07c7-4d19-b5a3-49773fa023a7\") " Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.346556 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-scripts" (OuterVolumeSpecName: "scripts") pod "e4a6498b-07c7-4d19-b5a3-49773fa023a7" (UID: "e4a6498b-07c7-4d19-b5a3-49773fa023a7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.349547 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4a6498b-07c7-4d19-b5a3-49773fa023a7-kube-api-access-nprs5" (OuterVolumeSpecName: "kube-api-access-nprs5") pod "e4a6498b-07c7-4d19-b5a3-49773fa023a7" (UID: "e4a6498b-07c7-4d19-b5a3-49773fa023a7"). InnerVolumeSpecName "kube-api-access-nprs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.375062 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.397651 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.413608 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4a6498b-07c7-4d19-b5a3-49773fa023a7" (UID: "e4a6498b-07c7-4d19-b5a3-49773fa023a7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.415534 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.416012 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.416047 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.416059 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="ceilometer-notification-agent" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.416066 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="ceilometer-notification-agent" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.416076 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="sg-core" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.416082 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="sg-core" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.416094 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon-log" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.416100 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon-log" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.416130 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="ceilometer-central-agent" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.416137 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="ceilometer-central-agent" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.416144 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="proxy-httpd" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.416152 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="proxy-httpd" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.416168 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.416176 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" Nov 29 04:32:52 crc kubenswrapper[4631]: E1129 04:32:52.416209 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4a6498b-07c7-4d19-b5a3-49773fa023a7" containerName="nova-cell0-conductor-db-sync" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.416218 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4a6498b-07c7-4d19-b5a3-49773fa023a7" containerName="nova-cell0-conductor-db-sync" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.417101 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="ceilometer-central-agent" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.417125 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="ceilometer-notification-agent" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.417137 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon-log" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.417148 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="proxy-httpd" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.417157 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.417165 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4a6498b-07c7-4d19-b5a3-49773fa023a7" containerName="nova-cell0-conductor-db-sync" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.417174 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" containerName="horizon" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.417184 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" containerName="sg-core" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.419804 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.422219 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.424771 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.424958 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.429870 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-config-data" (OuterVolumeSpecName: "config-data") pod "e4a6498b-07c7-4d19-b5a3-49773fa023a7" (UID: "e4a6498b-07c7-4d19-b5a3-49773fa023a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.441893 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.441912 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.441920 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4a6498b-07c7-4d19-b5a3-49773fa023a7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.441931 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nprs5\" (UniqueName: \"kubernetes.io/projected/e4a6498b-07c7-4d19-b5a3-49773fa023a7-kube-api-access-nprs5\") on node \"crc\" DevicePath \"\"" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.543680 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-log-httpd\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.543723 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.543747 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-scripts\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.543769 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-run-httpd\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.543861 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-config-data\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.543903 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.543918 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngl2f\" (UniqueName: \"kubernetes.io/projected/f7b82554-0254-4b14-8983-e70c42a48315-kube-api-access-ngl2f\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.645577 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.645619 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-scripts\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.645639 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-run-httpd\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.645667 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-config-data\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.645697 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.645715 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngl2f\" (UniqueName: \"kubernetes.io/projected/f7b82554-0254-4b14-8983-e70c42a48315-kube-api-access-ngl2f\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.645806 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-log-httpd\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.646181 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-log-httpd\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.646937 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-run-httpd\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.652075 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.652076 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-scripts\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.652266 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-config-data\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.652547 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.663233 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngl2f\" (UniqueName: \"kubernetes.io/projected/f7b82554-0254-4b14-8983-e70c42a48315-kube-api-access-ngl2f\") pod \"ceilometer-0\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " pod="openstack/ceilometer-0" Nov 29 04:32:52 crc kubenswrapper[4631]: I1129 04:32:52.805031 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.022990 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-cb52v" event={"ID":"e4a6498b-07c7-4d19-b5a3-49773fa023a7","Type":"ContainerDied","Data":"3ddc5edb6f73cb73f4a514cb2ba9f89e2cac940376430c0df430d4a523eaab06"} Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.023358 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ddc5edb6f73cb73f4a514cb2ba9f89e2cac940376430c0df430d4a523eaab06" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.023428 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-cb52v" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.036223 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-76fdc69464-qvs2b" event={"ID":"4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14","Type":"ContainerDied","Data":"cb412ed9a6866664db61f196fb7f78357a51c0fb01b19d75336d0942e33b7106"} Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.036295 4631 scope.go:117] "RemoveContainer" containerID="229305a7e7722d15999a5144c7f2a46b050e7800a04c60576976efcb74cd5340" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.036432 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-76fdc69464-qvs2b" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.105734 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-76fdc69464-qvs2b"] Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.117417 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-76fdc69464-qvs2b"] Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.130635 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.131879 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.143801 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.144106 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-m2b45" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.147947 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.170440 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.233178 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34148105-3978-44c8-a28f-ac93b2e4ed06" path="/var/lib/kubelet/pods/34148105-3978-44c8-a28f-ac93b2e4ed06/volumes" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.234672 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14" path="/var/lib/kubelet/pods/4cf4c6c5-96a8-4e0a-b00d-0c75a9acce14/volumes" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.277638 4631 scope.go:117] "RemoveContainer" containerID="b8821366a8d7bbbc829496fae08b6db06349f21d4ce52f873b767b5bf290f050" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.285609 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbqc9\" (UniqueName: \"kubernetes.io/projected/2c6f9871-305a-473f-8610-475ad792012a-kube-api-access-jbqc9\") pod \"nova-cell0-conductor-0\" (UID: \"2c6f9871-305a-473f-8610-475ad792012a\") " pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.285733 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6f9871-305a-473f-8610-475ad792012a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2c6f9871-305a-473f-8610-475ad792012a\") " pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.285780 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6f9871-305a-473f-8610-475ad792012a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2c6f9871-305a-473f-8610-475ad792012a\") " pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: W1129 04:32:53.289446 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7b82554_0254_4b14_8983_e70c42a48315.slice/crio-425b2264648533413221a1781856dbd45278d72e268a38d0fae89f0873cb36bf WatchSource:0}: Error finding container 425b2264648533413221a1781856dbd45278d72e268a38d0fae89f0873cb36bf: Status 404 returned error can't find the container with id 425b2264648533413221a1781856dbd45278d72e268a38d0fae89f0873cb36bf Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.387396 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbqc9\" (UniqueName: \"kubernetes.io/projected/2c6f9871-305a-473f-8610-475ad792012a-kube-api-access-jbqc9\") pod \"nova-cell0-conductor-0\" (UID: \"2c6f9871-305a-473f-8610-475ad792012a\") " pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.387562 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6f9871-305a-473f-8610-475ad792012a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2c6f9871-305a-473f-8610-475ad792012a\") " pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.387616 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6f9871-305a-473f-8610-475ad792012a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2c6f9871-305a-473f-8610-475ad792012a\") " pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.394199 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6f9871-305a-473f-8610-475ad792012a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2c6f9871-305a-473f-8610-475ad792012a\") " pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.396211 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6f9871-305a-473f-8610-475ad792012a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2c6f9871-305a-473f-8610-475ad792012a\") " pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.406935 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbqc9\" (UniqueName: \"kubernetes.io/projected/2c6f9871-305a-473f-8610-475ad792012a-kube-api-access-jbqc9\") pod \"nova-cell0-conductor-0\" (UID: \"2c6f9871-305a-473f-8610-475ad792012a\") " pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.491731 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:53 crc kubenswrapper[4631]: I1129 04:32:53.980935 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 29 04:32:54 crc kubenswrapper[4631]: I1129 04:32:54.048166 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2c6f9871-305a-473f-8610-475ad792012a","Type":"ContainerStarted","Data":"f5ad8175859434560d5fac69670353d5415f057e4bd4f28d357766f195603fe7"} Nov 29 04:32:54 crc kubenswrapper[4631]: I1129 04:32:54.052891 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7b82554-0254-4b14-8983-e70c42a48315","Type":"ContainerStarted","Data":"425b2264648533413221a1781856dbd45278d72e268a38d0fae89f0873cb36bf"} Nov 29 04:32:55 crc kubenswrapper[4631]: I1129 04:32:55.065087 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2c6f9871-305a-473f-8610-475ad792012a","Type":"ContainerStarted","Data":"16976a9ad582988f7e4ba3fae43db22c1fe8e013b67b5db5349a7226ab3e4989"} Nov 29 04:32:55 crc kubenswrapper[4631]: I1129 04:32:55.065828 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 29 04:32:55 crc kubenswrapper[4631]: I1129 04:32:55.067804 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7b82554-0254-4b14-8983-e70c42a48315","Type":"ContainerStarted","Data":"eb8d793ae7675e1991a30808202976cc94ca73a2ef52e177344c7128b23ff3ec"} Nov 29 04:32:55 crc kubenswrapper[4631]: I1129 04:32:55.085972 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.085955927 podStartE2EDuration="2.085955927s" podCreationTimestamp="2025-11-29 04:32:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:32:55.0852442 +0000 UTC m=+1302.149747734" watchObservedRunningTime="2025-11-29 04:32:55.085955927 +0000 UTC m=+1302.150459431" Nov 29 04:32:56 crc kubenswrapper[4631]: I1129 04:32:56.079669 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7b82554-0254-4b14-8983-e70c42a48315","Type":"ContainerStarted","Data":"2d89503d2a3f51fcceb3f6f7b8266ba5f977719bf144c8248bd2ce9f7450df5c"} Nov 29 04:32:59 crc kubenswrapper[4631]: I1129 04:32:59.119892 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7b82554-0254-4b14-8983-e70c42a48315","Type":"ContainerStarted","Data":"60d2797809b903548b97779c909345691dc7d36bce26c2841cc3c08f714b5c7d"} Nov 29 04:33:03 crc kubenswrapper[4631]: I1129 04:33:03.546387 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 29 04:33:04 crc kubenswrapper[4631]: I1129 04:33:04.167206 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7b82554-0254-4b14-8983-e70c42a48315","Type":"ContainerStarted","Data":"d295df1ad8b80ceb227232d22e1c0e9b4d8bd98ddadd7eabfa9dcd17c3c045c9"} Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.179503 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.217967 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.912226184 podStartE2EDuration="13.217942156s" podCreationTimestamp="2025-11-29 04:32:52 +0000 UTC" firstStartedPulling="2025-11-29 04:32:53.295808599 +0000 UTC m=+1300.360312113" lastFinishedPulling="2025-11-29 04:33:02.601524541 +0000 UTC m=+1309.666028085" observedRunningTime="2025-11-29 04:33:05.212947914 +0000 UTC m=+1312.277451438" watchObservedRunningTime="2025-11-29 04:33:05.217942156 +0000 UTC m=+1312.282445690" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.521837 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-4vzq5"] Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.524581 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.527161 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.534233 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.596189 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-4vzq5"] Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.640101 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-config-data\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.640135 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-scripts\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.640161 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6q84\" (UniqueName: \"kubernetes.io/projected/7da671c0-0a91-41f8-9c7c-a128b5f080d4-kube-api-access-p6q84\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.640219 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.668795 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.688340 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.688443 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.693640 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.742437 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-config-data\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.742479 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-scripts\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.742502 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6q84\" (UniqueName: \"kubernetes.io/projected/7da671c0-0a91-41f8-9c7c-a128b5f080d4-kube-api-access-p6q84\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.742541 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m46qq\" (UniqueName: \"kubernetes.io/projected/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-kube-api-access-m46qq\") pod \"nova-scheduler-0\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.742591 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.742645 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.742662 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-config-data\") pod \"nova-scheduler-0\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.772514 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-config-data\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.775676 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.776309 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-scripts\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.776853 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6q84\" (UniqueName: \"kubernetes.io/projected/7da671c0-0a91-41f8-9c7c-a128b5f080d4-kube-api-access-p6q84\") pod \"nova-cell0-cell-mapping-4vzq5\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.787802 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.789289 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.791696 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.806646 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.807878 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.817169 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.832948 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.841465 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.846397 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.846446 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-config-data\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.846551 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m46qq\" (UniqueName: \"kubernetes.io/projected/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-kube-api-access-m46qq\") pod \"nova-scheduler-0\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.846570 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh2ks\" (UniqueName: \"kubernetes.io/projected/674320ff-f1bc-464e-b962-1118dad1c0e4-kube-api-access-mh2ks\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.846603 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/674320ff-f1bc-464e-b962-1118dad1c0e4-logs\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.846668 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.846683 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-config-data\") pod \"nova-scheduler-0\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.853117 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.858756 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-config-data\") pod \"nova-scheduler-0\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.885740 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.907640 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m46qq\" (UniqueName: \"kubernetes.io/projected/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-kube-api-access-m46qq\") pod \"nova-scheduler-0\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.918946 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.949408 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.951556 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-config-data\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.951592 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.951667 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh2ks\" (UniqueName: \"kubernetes.io/projected/674320ff-f1bc-464e-b962-1118dad1c0e4-kube-api-access-mh2ks\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.951684 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wxxc\" (UniqueName: \"kubernetes.io/projected/293c4669-4e62-4437-9076-b24ce3b5bc02-kube-api-access-6wxxc\") pod \"nova-cell1-novncproxy-0\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.951720 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/674320ff-f1bc-464e-b962-1118dad1c0e4-logs\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.951752 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.951801 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.952760 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/674320ff-f1bc-464e-b962-1118dad1c0e4-logs\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.959409 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 29 04:33:05 crc kubenswrapper[4631]: I1129 04:33:05.996700 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.005842 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh2ks\" (UniqueName: \"kubernetes.io/projected/674320ff-f1bc-464e-b962-1118dad1c0e4-kube-api-access-mh2ks\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.014367 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.017190 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-config-data\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.036960 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " pod="openstack/nova-api-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.058532 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.058607 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.058675 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wxxc\" (UniqueName: \"kubernetes.io/projected/293c4669-4e62-4437-9076-b24ce3b5bc02-kube-api-access-6wxxc\") pod \"nova-cell1-novncproxy-0\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.058691 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rn5s\" (UniqueName: \"kubernetes.io/projected/485ad445-4582-4933-9d89-ff9457805da0-kube-api-access-9rn5s\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.058712 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-config-data\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.058730 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/485ad445-4582-4933-9d89-ff9457805da0-logs\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.058765 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.063482 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.068728 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.099539 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2mddc"] Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.113915 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wxxc\" (UniqueName: \"kubernetes.io/projected/293c4669-4e62-4437-9076-b24ce3b5bc02-kube-api-access-6wxxc\") pod \"nova-cell1-novncproxy-0\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.121457 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.160790 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rn5s\" (UniqueName: \"kubernetes.io/projected/485ad445-4582-4933-9d89-ff9457805da0-kube-api-access-9rn5s\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.160845 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-config-data\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.160867 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/485ad445-4582-4933-9d89-ff9457805da0-logs\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.160921 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-svc\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.160963 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-config\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.161002 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.161047 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.161085 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.161146 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.161340 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klskj\" (UniqueName: \"kubernetes.io/projected/005dc9bc-563c-460c-9c82-2203b2512a69-kube-api-access-klskj\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.163212 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.163984 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/485ad445-4582-4933-9d89-ff9457805da0-logs\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.176465 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2mddc"] Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.190563 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-config-data\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.209117 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rn5s\" (UniqueName: \"kubernetes.io/projected/485ad445-4582-4933-9d89-ff9457805da0-kube-api-access-9rn5s\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.231090 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.263514 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-config\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.264591 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-config\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.264682 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.264727 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.264794 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.264816 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klskj\" (UniqueName: \"kubernetes.io/projected/005dc9bc-563c-460c-9c82-2203b2512a69-kube-api-access-klskj\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.264911 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-svc\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.265582 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-svc\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.266533 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.268639 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.269238 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.282772 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klskj\" (UniqueName: \"kubernetes.io/projected/005dc9bc-563c-460c-9c82-2203b2512a69-kube-api-access-klskj\") pod \"dnsmasq-dns-757b4f8459-2mddc\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.297112 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.320689 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.510073 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:06 crc kubenswrapper[4631]: I1129 04:33:06.685766 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-4vzq5"] Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.850324 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.850663 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4vzq5" event={"ID":"7da671c0-0a91-41f8-9c7c-a128b5f080d4","Type":"ContainerStarted","Data":"fb0c9913a9213ee541ecced8e0af59d1895588ee70fd213ee0a94701842612e7"} Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.850694 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9mvkm"] Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.851976 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9mvkm"] Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.851996 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.852008 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.852017 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2mddc"] Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.852088 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.855785 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.856011 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 29 04:33:07 crc kubenswrapper[4631]: W1129 04:33:07.901181 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod485ad445_4582_4933_9d89_ff9457805da0.slice/crio-049dae8be79d2e5fae437c65093d6b3c7c0a633420478d96b0ab8c52e71a62c6 WatchSource:0}: Error finding container 049dae8be79d2e5fae437c65093d6b3c7c0a633420478d96b0ab8c52e71a62c6: Status 404 returned error can't find the container with id 049dae8be79d2e5fae437c65093d6b3c7c0a633420478d96b0ab8c52e71a62c6 Nov 29 04:33:07 crc kubenswrapper[4631]: W1129 04:33:07.939059 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod674320ff_f1bc_464e_b962_1118dad1c0e4.slice/crio-8e8a37508fe781fa85baf96bd95b5997b2151b3ac2736d7f832dc6fb374db84c WatchSource:0}: Error finding container 8e8a37508fe781fa85baf96bd95b5997b2151b3ac2736d7f832dc6fb374db84c: Status 404 returned error can't find the container with id 8e8a37508fe781fa85baf96bd95b5997b2151b3ac2736d7f832dc6fb374db84c Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.964432 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.973364 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-scripts\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.973582 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-config-data\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.973692 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpclm\" (UniqueName: \"kubernetes.io/projected/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-kube-api-access-dpclm\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:07 crc kubenswrapper[4631]: I1129 04:33:07.973839 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.075078 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-config-data\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.075381 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpclm\" (UniqueName: \"kubernetes.io/projected/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-kube-api-access-dpclm\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.075821 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.075995 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-scripts\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.081087 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.081697 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-config-data\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.082534 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-scripts\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.098636 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpclm\" (UniqueName: \"kubernetes.io/projected/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-kube-api-access-dpclm\") pod \"nova-cell1-conductor-db-sync-9mvkm\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.172142 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.340861 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"293c4669-4e62-4437-9076-b24ce3b5bc02","Type":"ContainerStarted","Data":"95e82f8948f1e493620eff7e5ee5061b4608caf30fadd9560cc4f874e0da2148"} Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.344488 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" event={"ID":"005dc9bc-563c-460c-9c82-2203b2512a69","Type":"ContainerStarted","Data":"8c3013c1d2108f14efb4bf6cef8ce75ebde60d6d9b11ad635112da4eba8e16ec"} Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.346143 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"674320ff-f1bc-464e-b962-1118dad1c0e4","Type":"ContainerStarted","Data":"8e8a37508fe781fa85baf96bd95b5997b2151b3ac2736d7f832dc6fb374db84c"} Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.348520 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ba30fb10-ed90-4538-ae0d-d041d9e74dcd","Type":"ContainerStarted","Data":"935cfe9b08a55807adfea35e9433742e5980b98597eeca58de31067f10dda1e4"} Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.349711 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485ad445-4582-4933-9d89-ff9457805da0","Type":"ContainerStarted","Data":"049dae8be79d2e5fae437c65093d6b3c7c0a633420478d96b0ab8c52e71a62c6"} Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.384119 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-4vzq5" podStartSLOduration=3.384099774 podStartE2EDuration="3.384099774s" podCreationTimestamp="2025-11-29 04:33:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:33:08.379777249 +0000 UTC m=+1315.444280753" watchObservedRunningTime="2025-11-29 04:33:08.384099774 +0000 UTC m=+1315.448603288" Nov 29 04:33:08 crc kubenswrapper[4631]: I1129 04:33:08.605136 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9mvkm"] Nov 29 04:33:09 crc kubenswrapper[4631]: I1129 04:33:09.369786 4631 generic.go:334] "Generic (PLEG): container finished" podID="005dc9bc-563c-460c-9c82-2203b2512a69" containerID="5b36d023aa70f7b9e041c78fe1993f8f2e0a861a2ee66b7f6d37be7b10111450" exitCode=0 Nov 29 04:33:09 crc kubenswrapper[4631]: I1129 04:33:09.371013 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" event={"ID":"005dc9bc-563c-460c-9c82-2203b2512a69","Type":"ContainerDied","Data":"5b36d023aa70f7b9e041c78fe1993f8f2e0a861a2ee66b7f6d37be7b10111450"} Nov 29 04:33:09 crc kubenswrapper[4631]: I1129 04:33:09.375921 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9mvkm" event={"ID":"b7e732a2-0a21-4bd8-af75-1ac34236fa2d","Type":"ContainerStarted","Data":"9358c3a7d53991a3e5272bf14397fb1d5550a01dd56b7b2b7cfc9f7a881b0bfe"} Nov 29 04:33:09 crc kubenswrapper[4631]: I1129 04:33:09.375949 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9mvkm" event={"ID":"b7e732a2-0a21-4bd8-af75-1ac34236fa2d","Type":"ContainerStarted","Data":"8decc75382a662293f9832d62c4857457fe0befbc2f7aeac9aa07fbe23f6af56"} Nov 29 04:33:09 crc kubenswrapper[4631]: I1129 04:33:09.384495 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4vzq5" event={"ID":"7da671c0-0a91-41f8-9c7c-a128b5f080d4","Type":"ContainerStarted","Data":"aa6e96e21d1993f9690106968d034fd343e937c4e57d7712794aec9ed5083386"} Nov 29 04:33:09 crc kubenswrapper[4631]: I1129 04:33:09.420506 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-9mvkm" podStartSLOduration=2.419607558 podStartE2EDuration="2.419607558s" podCreationTimestamp="2025-11-29 04:33:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:33:09.413006777 +0000 UTC m=+1316.477510291" watchObservedRunningTime="2025-11-29 04:33:09.419607558 +0000 UTC m=+1316.484111062" Nov 29 04:33:10 crc kubenswrapper[4631]: I1129 04:33:10.394046 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" event={"ID":"005dc9bc-563c-460c-9c82-2203b2512a69","Type":"ContainerStarted","Data":"a0b1bd5c274c1934b738fca3d643c1f6bd6d0e8b4c57f7be1c79e36ce4f822cf"} Nov 29 04:33:10 crc kubenswrapper[4631]: I1129 04:33:10.414893 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" podStartSLOduration=5.414877168 podStartE2EDuration="5.414877168s" podCreationTimestamp="2025-11-29 04:33:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:33:10.408400849 +0000 UTC m=+1317.472904363" watchObservedRunningTime="2025-11-29 04:33:10.414877168 +0000 UTC m=+1317.479380682" Nov 29 04:33:10 crc kubenswrapper[4631]: I1129 04:33:10.663918 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 04:33:10 crc kubenswrapper[4631]: I1129 04:33:10.756792 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:33:11 crc kubenswrapper[4631]: I1129 04:33:11.405990 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:16 crc kubenswrapper[4631]: I1129 04:33:16.512208 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:33:16 crc kubenswrapper[4631]: I1129 04:33:16.572632 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-hhxk2"] Nov 29 04:33:16 crc kubenswrapper[4631]: I1129 04:33:16.572916 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" podUID="108ade1d-ac82-4183-99eb-761b54886da9" containerName="dnsmasq-dns" containerID="cri-o://a5fbcc4c5d1d2bfb9e698ae2a45f79c748484820fe6b9822f2930f1aea302732" gracePeriod=10 Nov 29 04:33:17 crc kubenswrapper[4631]: I1129 04:33:17.469424 4631 generic.go:334] "Generic (PLEG): container finished" podID="108ade1d-ac82-4183-99eb-761b54886da9" containerID="a5fbcc4c5d1d2bfb9e698ae2a45f79c748484820fe6b9822f2930f1aea302732" exitCode=0 Nov 29 04:33:17 crc kubenswrapper[4631]: I1129 04:33:17.469514 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" event={"ID":"108ade1d-ac82-4183-99eb-761b54886da9","Type":"ContainerDied","Data":"a5fbcc4c5d1d2bfb9e698ae2a45f79c748484820fe6b9822f2930f1aea302732"} Nov 29 04:33:18 crc kubenswrapper[4631]: I1129 04:33:18.040101 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" podUID="108ade1d-ac82-4183-99eb-761b54886da9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.164:5353: connect: connection refused" Nov 29 04:33:21 crc kubenswrapper[4631]: I1129 04:33:21.518985 4631 generic.go:334] "Generic (PLEG): container finished" podID="7da671c0-0a91-41f8-9c7c-a128b5f080d4" containerID="aa6e96e21d1993f9690106968d034fd343e937c4e57d7712794aec9ed5083386" exitCode=0 Nov 29 04:33:21 crc kubenswrapper[4631]: I1129 04:33:21.519176 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4vzq5" event={"ID":"7da671c0-0a91-41f8-9c7c-a128b5f080d4","Type":"ContainerDied","Data":"aa6e96e21d1993f9690106968d034fd343e937c4e57d7712794aec9ed5083386"} Nov 29 04:33:22 crc kubenswrapper[4631]: E1129 04:33:22.156417 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified" Nov 29 04:33:22 crc kubenswrapper[4631]: E1129 04:33:22.157185 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nova-cell1-novncproxy-novncproxy,Image:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f4h7chf8h66ch58dh5d5hbbhf5hddh545h5dch79h5ddh587h5dfhdch64chdh564h584h88h548h577hf9hfbh664hc9hc7hd7h66dh649h579q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/kolla/config_files/config.json,SubPath:nova-novncproxy-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6wxxc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/vnc_lite.html,Port:{0 6080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:10,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/vnc_lite.html,Port:{0 6080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42436,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/vnc_lite.html,Port:{0 6080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:6,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-cell1-novncproxy-0_openstack(293c4669-4e62-4437-9076-b24ce3b5bc02): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 04:33:22 crc kubenswrapper[4631]: E1129 04:33:22.159519 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell1-novncproxy-novncproxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/nova-cell1-novncproxy-0" podUID="293c4669-4e62-4437-9076-b24ce3b5bc02" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.290182 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.397566 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-nb\") pod \"108ade1d-ac82-4183-99eb-761b54886da9\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.397641 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-swift-storage-0\") pod \"108ade1d-ac82-4183-99eb-761b54886da9\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.397870 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-sb\") pod \"108ade1d-ac82-4183-99eb-761b54886da9\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.397899 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-svc\") pod \"108ade1d-ac82-4183-99eb-761b54886da9\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.397936 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gncpn\" (UniqueName: \"kubernetes.io/projected/108ade1d-ac82-4183-99eb-761b54886da9-kube-api-access-gncpn\") pod \"108ade1d-ac82-4183-99eb-761b54886da9\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.397984 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-config\") pod \"108ade1d-ac82-4183-99eb-761b54886da9\" (UID: \"108ade1d-ac82-4183-99eb-761b54886da9\") " Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.408356 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/108ade1d-ac82-4183-99eb-761b54886da9-kube-api-access-gncpn" (OuterVolumeSpecName: "kube-api-access-gncpn") pod "108ade1d-ac82-4183-99eb-761b54886da9" (UID: "108ade1d-ac82-4183-99eb-761b54886da9"). InnerVolumeSpecName "kube-api-access-gncpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.465783 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-config" (OuterVolumeSpecName: "config") pod "108ade1d-ac82-4183-99eb-761b54886da9" (UID: "108ade1d-ac82-4183-99eb-761b54886da9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.469020 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "108ade1d-ac82-4183-99eb-761b54886da9" (UID: "108ade1d-ac82-4183-99eb-761b54886da9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.470941 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "108ade1d-ac82-4183-99eb-761b54886da9" (UID: "108ade1d-ac82-4183-99eb-761b54886da9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.477709 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "108ade1d-ac82-4183-99eb-761b54886da9" (UID: "108ade1d-ac82-4183-99eb-761b54886da9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.493855 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "108ade1d-ac82-4183-99eb-761b54886da9" (UID: "108ade1d-ac82-4183-99eb-761b54886da9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.501806 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.501836 4631 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.501849 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.501861 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.501872 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gncpn\" (UniqueName: \"kubernetes.io/projected/108ade1d-ac82-4183-99eb-761b54886da9-kube-api-access-gncpn\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.501885 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/108ade1d-ac82-4183-99eb-761b54886da9-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.530580 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.531214 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-hhxk2" event={"ID":"108ade1d-ac82-4183-99eb-761b54886da9","Type":"ContainerDied","Data":"070e7fac880aa8741df152b03cdb1c69206949170b1aebf90f70213e87295718"} Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.531273 4631 scope.go:117] "RemoveContainer" containerID="a5fbcc4c5d1d2bfb9e698ae2a45f79c748484820fe6b9822f2930f1aea302732" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.603170 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-hhxk2"] Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.629629 4631 scope.go:117] "RemoveContainer" containerID="1a5362cd150902794571cc7f4ea2f956239a31696486791db9726e59a3d22aec" Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.665286 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-hhxk2"] Nov 29 04:33:22 crc kubenswrapper[4631]: I1129 04:33:22.819949 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.143956 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.214291 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-scripts\") pod \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.214362 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-combined-ca-bundle\") pod \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.214483 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-config-data\") pod \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.214546 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6q84\" (UniqueName: \"kubernetes.io/projected/7da671c0-0a91-41f8-9c7c-a128b5f080d4-kube-api-access-p6q84\") pod \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\" (UID: \"7da671c0-0a91-41f8-9c7c-a128b5f080d4\") " Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.219318 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7da671c0-0a91-41f8-9c7c-a128b5f080d4-kube-api-access-p6q84" (OuterVolumeSpecName: "kube-api-access-p6q84") pod "7da671c0-0a91-41f8-9c7c-a128b5f080d4" (UID: "7da671c0-0a91-41f8-9c7c-a128b5f080d4"). InnerVolumeSpecName "kube-api-access-p6q84". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.233711 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.236697 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-scripts" (OuterVolumeSpecName: "scripts") pod "7da671c0-0a91-41f8-9c7c-a128b5f080d4" (UID: "7da671c0-0a91-41f8-9c7c-a128b5f080d4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.244211 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="108ade1d-ac82-4183-99eb-761b54886da9" path="/var/lib/kubelet/pods/108ade1d-ac82-4183-99eb-761b54886da9/volumes" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.275369 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-config-data" (OuterVolumeSpecName: "config-data") pod "7da671c0-0a91-41f8-9c7c-a128b5f080d4" (UID: "7da671c0-0a91-41f8-9c7c-a128b5f080d4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.314661 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7da671c0-0a91-41f8-9c7c-a128b5f080d4" (UID: "7da671c0-0a91-41f8-9c7c-a128b5f080d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.316011 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-config-data\") pod \"293c4669-4e62-4437-9076-b24ce3b5bc02\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.316166 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-combined-ca-bundle\") pod \"293c4669-4e62-4437-9076-b24ce3b5bc02\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.316443 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wxxc\" (UniqueName: \"kubernetes.io/projected/293c4669-4e62-4437-9076-b24ce3b5bc02-kube-api-access-6wxxc\") pod \"293c4669-4e62-4437-9076-b24ce3b5bc02\" (UID: \"293c4669-4e62-4437-9076-b24ce3b5bc02\") " Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.317821 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6q84\" (UniqueName: \"kubernetes.io/projected/7da671c0-0a91-41f8-9c7c-a128b5f080d4-kube-api-access-p6q84\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.318110 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.318488 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.318607 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7da671c0-0a91-41f8-9c7c-a128b5f080d4-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.322146 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/293c4669-4e62-4437-9076-b24ce3b5bc02-kube-api-access-6wxxc" (OuterVolumeSpecName: "kube-api-access-6wxxc") pod "293c4669-4e62-4437-9076-b24ce3b5bc02" (UID: "293c4669-4e62-4437-9076-b24ce3b5bc02"). InnerVolumeSpecName "kube-api-access-6wxxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.322238 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "293c4669-4e62-4437-9076-b24ce3b5bc02" (UID: "293c4669-4e62-4437-9076-b24ce3b5bc02"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.323986 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-config-data" (OuterVolumeSpecName: "config-data") pod "293c4669-4e62-4437-9076-b24ce3b5bc02" (UID: "293c4669-4e62-4437-9076-b24ce3b5bc02"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.420430 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.420476 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/293c4669-4e62-4437-9076-b24ce3b5bc02-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.420700 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wxxc\" (UniqueName: \"kubernetes.io/projected/293c4669-4e62-4437-9076-b24ce3b5bc02-kube-api-access-6wxxc\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.560863 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"293c4669-4e62-4437-9076-b24ce3b5bc02","Type":"ContainerDied","Data":"95e82f8948f1e493620eff7e5ee5061b4608caf30fadd9560cc4f874e0da2148"} Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.561022 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.575480 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"674320ff-f1bc-464e-b962-1118dad1c0e4","Type":"ContainerStarted","Data":"e3b5023315514c2f9d0a481917d092c108b22b3df55cf757ac474684c7f1d9cb"} Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.578808 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ba30fb10-ed90-4538-ae0d-d041d9e74dcd","Type":"ContainerStarted","Data":"ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c"} Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.579013 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-4vzq5" event={"ID":"7da671c0-0a91-41f8-9c7c-a128b5f080d4","Type":"ContainerDied","Data":"fb0c9913a9213ee541ecced8e0af59d1895588ee70fd213ee0a94701842612e7"} Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.579043 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-4vzq5" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.579056 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb0c9913a9213ee541ecced8e0af59d1895588ee70fd213ee0a94701842612e7" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.593225 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485ad445-4582-4933-9d89-ff9457805da0","Type":"ContainerStarted","Data":"928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56"} Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.597397 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.415101452 podStartE2EDuration="18.597378788s" podCreationTimestamp="2025-11-29 04:33:05 +0000 UTC" firstStartedPulling="2025-11-29 04:33:07.890675127 +0000 UTC m=+1314.955178641" lastFinishedPulling="2025-11-29 04:33:23.072952463 +0000 UTC m=+1330.137455977" observedRunningTime="2025-11-29 04:33:23.592602691 +0000 UTC m=+1330.657106205" watchObservedRunningTime="2025-11-29 04:33:23.597378788 +0000 UTC m=+1330.661882302" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.717023 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.726071 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.734413 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 04:33:23 crc kubenswrapper[4631]: E1129 04:33:23.734807 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7da671c0-0a91-41f8-9c7c-a128b5f080d4" containerName="nova-manage" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.734823 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="7da671c0-0a91-41f8-9c7c-a128b5f080d4" containerName="nova-manage" Nov 29 04:33:23 crc kubenswrapper[4631]: E1129 04:33:23.734848 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="108ade1d-ac82-4183-99eb-761b54886da9" containerName="dnsmasq-dns" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.734855 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="108ade1d-ac82-4183-99eb-761b54886da9" containerName="dnsmasq-dns" Nov 29 04:33:23 crc kubenswrapper[4631]: E1129 04:33:23.734871 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="108ade1d-ac82-4183-99eb-761b54886da9" containerName="init" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.734877 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="108ade1d-ac82-4183-99eb-761b54886da9" containerName="init" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.735037 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="108ade1d-ac82-4183-99eb-761b54886da9" containerName="dnsmasq-dns" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.735050 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="7da671c0-0a91-41f8-9c7c-a128b5f080d4" containerName="nova-manage" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.736955 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.741509 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.744421 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.745826 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.746905 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.811680 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.826795 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.838094 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74fht\" (UniqueName: \"kubernetes.io/projected/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-kube-api-access-74fht\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.838237 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.838358 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.838485 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.838602 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.939978 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.940235 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74fht\" (UniqueName: \"kubernetes.io/projected/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-kube-api-access-74fht\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.940355 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.940440 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.940543 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.946016 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.946066 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.949793 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.953765 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:23 crc kubenswrapper[4631]: I1129 04:33:23.961978 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74fht\" (UniqueName: \"kubernetes.io/projected/462ee7da-139d-4cd7-91c2-6bb6b02f9b57-kube-api-access-74fht\") pod \"nova-cell1-novncproxy-0\" (UID: \"462ee7da-139d-4cd7-91c2-6bb6b02f9b57\") " pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:24 crc kubenswrapper[4631]: I1129 04:33:24.058393 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:24 crc kubenswrapper[4631]: I1129 04:33:24.530819 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 29 04:33:24 crc kubenswrapper[4631]: I1129 04:33:24.544901 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 04:33:24 crc kubenswrapper[4631]: I1129 04:33:24.628124 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485ad445-4582-4933-9d89-ff9457805da0","Type":"ContainerStarted","Data":"6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347"} Nov 29 04:33:24 crc kubenswrapper[4631]: I1129 04:33:24.628301 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="485ad445-4582-4933-9d89-ff9457805da0" containerName="nova-metadata-log" containerID="cri-o://928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56" gracePeriod=30 Nov 29 04:33:24 crc kubenswrapper[4631]: I1129 04:33:24.628510 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="485ad445-4582-4933-9d89-ff9457805da0" containerName="nova-metadata-metadata" containerID="cri-o://6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347" gracePeriod=30 Nov 29 04:33:24 crc kubenswrapper[4631]: I1129 04:33:24.633696 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"462ee7da-139d-4cd7-91c2-6bb6b02f9b57","Type":"ContainerStarted","Data":"6bfa3907cf4ed81427ba3d2fce945999388ff1f8b8ca0486b4f041b9ab537a70"} Nov 29 04:33:24 crc kubenswrapper[4631]: I1129 04:33:24.647512 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.473046585 podStartE2EDuration="19.647494279s" podCreationTimestamp="2025-11-29 04:33:05 +0000 UTC" firstStartedPulling="2025-11-29 04:33:07.904450594 +0000 UTC m=+1314.968954108" lastFinishedPulling="2025-11-29 04:33:23.078898278 +0000 UTC m=+1330.143401802" observedRunningTime="2025-11-29 04:33:24.647097749 +0000 UTC m=+1331.711601283" watchObservedRunningTime="2025-11-29 04:33:24.647494279 +0000 UTC m=+1331.711997793" Nov 29 04:33:24 crc kubenswrapper[4631]: I1129 04:33:24.655345 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"674320ff-f1bc-464e-b962-1118dad1c0e4","Type":"ContainerStarted","Data":"cfa6c4e8f4142400713f0665695bb06f1525d59a196090a22b6eed55e283b960"} Nov 29 04:33:24 crc kubenswrapper[4631]: I1129 04:33:24.687265 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.559010738 podStartE2EDuration="19.687246671s" podCreationTimestamp="2025-11-29 04:33:05 +0000 UTC" firstStartedPulling="2025-11-29 04:33:07.943840408 +0000 UTC m=+1315.008343922" lastFinishedPulling="2025-11-29 04:33:23.072076341 +0000 UTC m=+1330.136579855" observedRunningTime="2025-11-29 04:33:24.672862049 +0000 UTC m=+1331.737365563" watchObservedRunningTime="2025-11-29 04:33:24.687246671 +0000 UTC m=+1331.751750185" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.158269 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.236492 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="293c4669-4e62-4437-9076-b24ce3b5bc02" path="/var/lib/kubelet/pods/293c4669-4e62-4437-9076-b24ce3b5bc02/volumes" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.272634 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-config-data\") pod \"485ad445-4582-4933-9d89-ff9457805da0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.272904 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/485ad445-4582-4933-9d89-ff9457805da0-logs\") pod \"485ad445-4582-4933-9d89-ff9457805da0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.273171 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-combined-ca-bundle\") pod \"485ad445-4582-4933-9d89-ff9457805da0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.273454 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/485ad445-4582-4933-9d89-ff9457805da0-logs" (OuterVolumeSpecName: "logs") pod "485ad445-4582-4933-9d89-ff9457805da0" (UID: "485ad445-4582-4933-9d89-ff9457805da0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.273867 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rn5s\" (UniqueName: \"kubernetes.io/projected/485ad445-4582-4933-9d89-ff9457805da0-kube-api-access-9rn5s\") pod \"485ad445-4582-4933-9d89-ff9457805da0\" (UID: \"485ad445-4582-4933-9d89-ff9457805da0\") " Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.275011 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/485ad445-4582-4933-9d89-ff9457805da0-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.281633 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/485ad445-4582-4933-9d89-ff9457805da0-kube-api-access-9rn5s" (OuterVolumeSpecName: "kube-api-access-9rn5s") pod "485ad445-4582-4933-9d89-ff9457805da0" (UID: "485ad445-4582-4933-9d89-ff9457805da0"). InnerVolumeSpecName "kube-api-access-9rn5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.325472 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-config-data" (OuterVolumeSpecName: "config-data") pod "485ad445-4582-4933-9d89-ff9457805da0" (UID: "485ad445-4582-4933-9d89-ff9457805da0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.325566 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "485ad445-4582-4933-9d89-ff9457805da0" (UID: "485ad445-4582-4933-9d89-ff9457805da0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.377284 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.377315 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rn5s\" (UniqueName: \"kubernetes.io/projected/485ad445-4582-4933-9d89-ff9457805da0-kube-api-access-9rn5s\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.377367 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/485ad445-4582-4933-9d89-ff9457805da0-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.661195 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"462ee7da-139d-4cd7-91c2-6bb6b02f9b57","Type":"ContainerStarted","Data":"2c000def651b5a4d082c153b1794ab5b6fc7c48c5dd1d622c20238ecfa2c286a"} Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.667689 4631 generic.go:334] "Generic (PLEG): container finished" podID="485ad445-4582-4933-9d89-ff9457805da0" containerID="6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347" exitCode=0 Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.667722 4631 generic.go:334] "Generic (PLEG): container finished" podID="485ad445-4582-4933-9d89-ff9457805da0" containerID="928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56" exitCode=143 Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.667883 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="674320ff-f1bc-464e-b962-1118dad1c0e4" containerName="nova-api-log" containerID="cri-o://e3b5023315514c2f9d0a481917d092c108b22b3df55cf757ac474684c7f1d9cb" gracePeriod=30 Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.668186 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.668273 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485ad445-4582-4933-9d89-ff9457805da0","Type":"ContainerDied","Data":"6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347"} Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.668309 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485ad445-4582-4933-9d89-ff9457805da0","Type":"ContainerDied","Data":"928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56"} Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.668322 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485ad445-4582-4933-9d89-ff9457805da0","Type":"ContainerDied","Data":"049dae8be79d2e5fae437c65093d6b3c7c0a633420478d96b0ab8c52e71a62c6"} Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.668353 4631 scope.go:117] "RemoveContainer" containerID="6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.668538 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="ba30fb10-ed90-4538-ae0d-d041d9e74dcd" containerName="nova-scheduler-scheduler" containerID="cri-o://ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c" gracePeriod=30 Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.668624 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="674320ff-f1bc-464e-b962-1118dad1c0e4" containerName="nova-api-api" containerID="cri-o://cfa6c4e8f4142400713f0665695bb06f1525d59a196090a22b6eed55e283b960" gracePeriod=30 Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.687231 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.230995237 podStartE2EDuration="2.687218354s" podCreationTimestamp="2025-11-29 04:33:23 +0000 UTC" firstStartedPulling="2025-11-29 04:33:24.544711785 +0000 UTC m=+1331.609215299" lastFinishedPulling="2025-11-29 04:33:25.000934892 +0000 UTC m=+1332.065438416" observedRunningTime="2025-11-29 04:33:25.686757013 +0000 UTC m=+1332.751260527" watchObservedRunningTime="2025-11-29 04:33:25.687218354 +0000 UTC m=+1332.751721868" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.701378 4631 scope.go:117] "RemoveContainer" containerID="928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.714380 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.728051 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.736257 4631 scope.go:117] "RemoveContainer" containerID="6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347" Nov 29 04:33:25 crc kubenswrapper[4631]: E1129 04:33:25.738644 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347\": container with ID starting with 6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347 not found: ID does not exist" containerID="6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.738680 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347"} err="failed to get container status \"6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347\": rpc error: code = NotFound desc = could not find container \"6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347\": container with ID starting with 6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347 not found: ID does not exist" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.738701 4631 scope.go:117] "RemoveContainer" containerID="928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56" Nov 29 04:33:25 crc kubenswrapper[4631]: E1129 04:33:25.741872 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56\": container with ID starting with 928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56 not found: ID does not exist" containerID="928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.741903 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56"} err="failed to get container status \"928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56\": rpc error: code = NotFound desc = could not find container \"928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56\": container with ID starting with 928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56 not found: ID does not exist" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.741922 4631 scope.go:117] "RemoveContainer" containerID="6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.741981 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:33:25 crc kubenswrapper[4631]: E1129 04:33:25.742324 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="485ad445-4582-4933-9d89-ff9457805da0" containerName="nova-metadata-metadata" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.742362 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="485ad445-4582-4933-9d89-ff9457805da0" containerName="nova-metadata-metadata" Nov 29 04:33:25 crc kubenswrapper[4631]: E1129 04:33:25.742400 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="485ad445-4582-4933-9d89-ff9457805da0" containerName="nova-metadata-log" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.742407 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="485ad445-4582-4933-9d89-ff9457805da0" containerName="nova-metadata-log" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.742583 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="485ad445-4582-4933-9d89-ff9457805da0" containerName="nova-metadata-log" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.742597 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="485ad445-4582-4933-9d89-ff9457805da0" containerName="nova-metadata-metadata" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.743471 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.744249 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347"} err="failed to get container status \"6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347\": rpc error: code = NotFound desc = could not find container \"6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347\": container with ID starting with 6e45064b756bb30e9f770b44ce2d62621fadff40c1c7e22cf693403e4faea347 not found: ID does not exist" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.744270 4631 scope.go:117] "RemoveContainer" containerID="928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.745960 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.746150 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56"} err="failed to get container status \"928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56\": rpc error: code = NotFound desc = could not find container \"928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56\": container with ID starting with 928d56bf7dcf0c33aa97f531fc60d02a976530c450ebb0ea7388643b2a307e56 not found: ID does not exist" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.746219 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.759252 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.898348 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-config-data\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.898456 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae1fc503-ebc1-4261-b4b8-ee167b101e08-logs\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.898478 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.898504 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:25 crc kubenswrapper[4631]: I1129 04:33:25.898537 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkbtj\" (UniqueName: \"kubernetes.io/projected/ae1fc503-ebc1-4261-b4b8-ee167b101e08-kube-api-access-wkbtj\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:25.999939 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-config-data\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.000028 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae1fc503-ebc1-4261-b4b8-ee167b101e08-logs\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.000047 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.000071 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.000104 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkbtj\" (UniqueName: \"kubernetes.io/projected/ae1fc503-ebc1-4261-b4b8-ee167b101e08-kube-api-access-wkbtj\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.000465 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae1fc503-ebc1-4261-b4b8-ee167b101e08-logs\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.005940 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.006618 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.016436 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.021683 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-config-data\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.039877 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkbtj\" (UniqueName: \"kubernetes.io/projected/ae1fc503-ebc1-4261-b4b8-ee167b101e08-kube-api-access-wkbtj\") pod \"nova-metadata-0\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.066817 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.516819 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.677514 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae1fc503-ebc1-4261-b4b8-ee167b101e08","Type":"ContainerStarted","Data":"70118047aecb1a3458ccbcae466b93ba1c3f6acf5c67359ab81e890f6c3e9cca"} Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.679982 4631 generic.go:334] "Generic (PLEG): container finished" podID="674320ff-f1bc-464e-b962-1118dad1c0e4" containerID="cfa6c4e8f4142400713f0665695bb06f1525d59a196090a22b6eed55e283b960" exitCode=0 Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.680002 4631 generic.go:334] "Generic (PLEG): container finished" podID="674320ff-f1bc-464e-b962-1118dad1c0e4" containerID="e3b5023315514c2f9d0a481917d092c108b22b3df55cf757ac474684c7f1d9cb" exitCode=143 Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.680063 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"674320ff-f1bc-464e-b962-1118dad1c0e4","Type":"ContainerDied","Data":"cfa6c4e8f4142400713f0665695bb06f1525d59a196090a22b6eed55e283b960"} Nov 29 04:33:26 crc kubenswrapper[4631]: I1129 04:33:26.680108 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"674320ff-f1bc-464e-b962-1118dad1c0e4","Type":"ContainerDied","Data":"e3b5023315514c2f9d0a481917d092c108b22b3df55cf757ac474684c7f1d9cb"} Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.244196 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="485ad445-4582-4933-9d89-ff9457805da0" path="/var/lib/kubelet/pods/485ad445-4582-4933-9d89-ff9457805da0/volumes" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.402295 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.529047 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-combined-ca-bundle\") pod \"674320ff-f1bc-464e-b962-1118dad1c0e4\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.529194 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh2ks\" (UniqueName: \"kubernetes.io/projected/674320ff-f1bc-464e-b962-1118dad1c0e4-kube-api-access-mh2ks\") pod \"674320ff-f1bc-464e-b962-1118dad1c0e4\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.529874 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/674320ff-f1bc-464e-b962-1118dad1c0e4-logs\") pod \"674320ff-f1bc-464e-b962-1118dad1c0e4\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.529903 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-config-data\") pod \"674320ff-f1bc-464e-b962-1118dad1c0e4\" (UID: \"674320ff-f1bc-464e-b962-1118dad1c0e4\") " Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.530104 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/674320ff-f1bc-464e-b962-1118dad1c0e4-logs" (OuterVolumeSpecName: "logs") pod "674320ff-f1bc-464e-b962-1118dad1c0e4" (UID: "674320ff-f1bc-464e-b962-1118dad1c0e4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.530321 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/674320ff-f1bc-464e-b962-1118dad1c0e4-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.534867 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/674320ff-f1bc-464e-b962-1118dad1c0e4-kube-api-access-mh2ks" (OuterVolumeSpecName: "kube-api-access-mh2ks") pod "674320ff-f1bc-464e-b962-1118dad1c0e4" (UID: "674320ff-f1bc-464e-b962-1118dad1c0e4"). InnerVolumeSpecName "kube-api-access-mh2ks". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.561823 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-config-data" (OuterVolumeSpecName: "config-data") pod "674320ff-f1bc-464e-b962-1118dad1c0e4" (UID: "674320ff-f1bc-464e-b962-1118dad1c0e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.564139 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "674320ff-f1bc-464e-b962-1118dad1c0e4" (UID: "674320ff-f1bc-464e-b962-1118dad1c0e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.631600 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.631630 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/674320ff-f1bc-464e-b962-1118dad1c0e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.631643 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh2ks\" (UniqueName: \"kubernetes.io/projected/674320ff-f1bc-464e-b962-1118dad1c0e4-kube-api-access-mh2ks\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.692714 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"674320ff-f1bc-464e-b962-1118dad1c0e4","Type":"ContainerDied","Data":"8e8a37508fe781fa85baf96bd95b5997b2151b3ac2736d7f832dc6fb374db84c"} Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.692760 4631 scope.go:117] "RemoveContainer" containerID="cfa6c4e8f4142400713f0665695bb06f1525d59a196090a22b6eed55e283b960" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.692888 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.709524 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae1fc503-ebc1-4261-b4b8-ee167b101e08","Type":"ContainerStarted","Data":"202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602"} Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.709565 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae1fc503-ebc1-4261-b4b8-ee167b101e08","Type":"ContainerStarted","Data":"f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59"} Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.758260 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.758461 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="03cdf7d8-fc05-44d0-a4a9-b62239838053" containerName="kube-state-metrics" containerID="cri-o://b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4" gracePeriod=30 Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.770814 4631 scope.go:117] "RemoveContainer" containerID="e3b5023315514c2f9d0a481917d092c108b22b3df55cf757ac474684c7f1d9cb" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.783184 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.800698 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.811098 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.811081484 podStartE2EDuration="2.811081484s" podCreationTimestamp="2025-11-29 04:33:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:33:27.747046788 +0000 UTC m=+1334.811550302" watchObservedRunningTime="2025-11-29 04:33:27.811081484 +0000 UTC m=+1334.875584998" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.822079 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:27 crc kubenswrapper[4631]: E1129 04:33:27.822533 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="674320ff-f1bc-464e-b962-1118dad1c0e4" containerName="nova-api-api" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.822550 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="674320ff-f1bc-464e-b962-1118dad1c0e4" containerName="nova-api-api" Nov 29 04:33:27 crc kubenswrapper[4631]: E1129 04:33:27.822588 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="674320ff-f1bc-464e-b962-1118dad1c0e4" containerName="nova-api-log" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.822595 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="674320ff-f1bc-464e-b962-1118dad1c0e4" containerName="nova-api-log" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.822761 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="674320ff-f1bc-464e-b962-1118dad1c0e4" containerName="nova-api-log" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.822789 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="674320ff-f1bc-464e-b962-1118dad1c0e4" containerName="nova-api-api" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.823972 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.825895 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.829252 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.845651 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="03cdf7d8-fc05-44d0-a4a9-b62239838053" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": dial tcp 10.217.0.103:8081: connect: connection refused" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.936841 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.936964 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvkbs\" (UniqueName: \"kubernetes.io/projected/e8d69e47-05ab-43fb-b75f-ad96caf08df0-kube-api-access-hvkbs\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.936996 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-config-data\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:27 crc kubenswrapper[4631]: I1129 04:33:27.937078 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8d69e47-05ab-43fb-b75f-ad96caf08df0-logs\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.038927 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.038992 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvkbs\" (UniqueName: \"kubernetes.io/projected/e8d69e47-05ab-43fb-b75f-ad96caf08df0-kube-api-access-hvkbs\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.039022 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-config-data\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.039042 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8d69e47-05ab-43fb-b75f-ad96caf08df0-logs\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.039423 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8d69e47-05ab-43fb-b75f-ad96caf08df0-logs\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.044110 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.053684 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-config-data\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.067552 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvkbs\" (UniqueName: \"kubernetes.io/projected/e8d69e47-05ab-43fb-b75f-ad96caf08df0-kube-api-access-hvkbs\") pod \"nova-api-0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " pod="openstack/nova-api-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.197491 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.207354 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.354174 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mcw6\" (UniqueName: \"kubernetes.io/projected/03cdf7d8-fc05-44d0-a4a9-b62239838053-kube-api-access-9mcw6\") pod \"03cdf7d8-fc05-44d0-a4a9-b62239838053\" (UID: \"03cdf7d8-fc05-44d0-a4a9-b62239838053\") " Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.694458 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03cdf7d8-fc05-44d0-a4a9-b62239838053-kube-api-access-9mcw6" (OuterVolumeSpecName: "kube-api-access-9mcw6") pod "03cdf7d8-fc05-44d0-a4a9-b62239838053" (UID: "03cdf7d8-fc05-44d0-a4a9-b62239838053"). InnerVolumeSpecName "kube-api-access-9mcw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.735982 4631 generic.go:334] "Generic (PLEG): container finished" podID="03cdf7d8-fc05-44d0-a4a9-b62239838053" containerID="b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4" exitCode=2 Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.736138 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.736993 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"03cdf7d8-fc05-44d0-a4a9-b62239838053","Type":"ContainerDied","Data":"b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4"} Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.737022 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"03cdf7d8-fc05-44d0-a4a9-b62239838053","Type":"ContainerDied","Data":"0401ec8e0ea3dee8b6dd060585b89e95a9c61ac739a4dbe60b24da75aa69ee4b"} Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.737041 4631 scope.go:117] "RemoveContainer" containerID="b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.769233 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mcw6\" (UniqueName: \"kubernetes.io/projected/03cdf7d8-fc05-44d0-a4a9-b62239838053-kube-api-access-9mcw6\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.817874 4631 scope.go:117] "RemoveContainer" containerID="b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4" Nov 29 04:33:28 crc kubenswrapper[4631]: E1129 04:33:28.822939 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4\": container with ID starting with b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4 not found: ID does not exist" containerID="b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.823001 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4"} err="failed to get container status \"b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4\": rpc error: code = NotFound desc = could not find container \"b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4\": container with ID starting with b485ae07730d7a6bb70dbe097ae403e8fd5ad00f185b10d6298931cca4f6e7d4 not found: ID does not exist" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.831358 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.848526 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.863200 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 04:33:28 crc kubenswrapper[4631]: E1129 04:33:28.863644 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03cdf7d8-fc05-44d0-a4a9-b62239838053" containerName="kube-state-metrics" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.863657 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="03cdf7d8-fc05-44d0-a4a9-b62239838053" containerName="kube-state-metrics" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.863878 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="03cdf7d8-fc05-44d0-a4a9-b62239838053" containerName="kube-state-metrics" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.864524 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.867024 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.867064 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.870805 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.974255 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.974790 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sctnr\" (UniqueName: \"kubernetes.io/projected/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-kube-api-access-sctnr\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.974962 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:28 crc kubenswrapper[4631]: I1129 04:33:28.975163 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.059767 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.077123 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sctnr\" (UniqueName: \"kubernetes.io/projected/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-kube-api-access-sctnr\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.077201 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.077276 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.077312 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.083928 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.084449 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.086018 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.107761 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sctnr\" (UniqueName: \"kubernetes.io/projected/c4c8b52f-72c8-4aac-9c57-df83ec5dfe20-kube-api-access-sctnr\") pod \"kube-state-metrics-0\" (UID: \"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20\") " pod="openstack/kube-state-metrics-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.182561 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.214299 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.256938 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03cdf7d8-fc05-44d0-a4a9-b62239838053" path="/var/lib/kubelet/pods/03cdf7d8-fc05-44d0-a4a9-b62239838053/volumes" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.258012 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="674320ff-f1bc-464e-b962-1118dad1c0e4" path="/var/lib/kubelet/pods/674320ff-f1bc-464e-b962-1118dad1c0e4/volumes" Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.463937 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 29 04:33:29 crc kubenswrapper[4631]: W1129 04:33:29.468446 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4c8b52f_72c8_4aac_9c57_df83ec5dfe20.slice/crio-dceaf719da1cf858a472c0e918a9f150cef6868f9d8e71c3a55337e4ce94d102 WatchSource:0}: Error finding container dceaf719da1cf858a472c0e918a9f150cef6868f9d8e71c3a55337e4ce94d102: Status 404 returned error can't find the container with id dceaf719da1cf858a472c0e918a9f150cef6868f9d8e71c3a55337e4ce94d102 Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.747716 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.748261 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="ceilometer-central-agent" containerID="cri-o://eb8d793ae7675e1991a30808202976cc94ca73a2ef52e177344c7128b23ff3ec" gracePeriod=30 Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.748272 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="sg-core" containerID="cri-o://60d2797809b903548b97779c909345691dc7d36bce26c2841cc3c08f714b5c7d" gracePeriod=30 Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.748285 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="proxy-httpd" containerID="cri-o://d295df1ad8b80ceb227232d22e1c0e9b4d8bd98ddadd7eabfa9dcd17c3c045c9" gracePeriod=30 Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.748591 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="ceilometer-notification-agent" containerID="cri-o://2d89503d2a3f51fcceb3f6f7b8266ba5f977719bf144c8248bd2ce9f7450df5c" gracePeriod=30 Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.758560 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e8d69e47-05ab-43fb-b75f-ad96caf08df0","Type":"ContainerStarted","Data":"45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea"} Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.758613 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e8d69e47-05ab-43fb-b75f-ad96caf08df0","Type":"ContainerStarted","Data":"332c3dc61578f79392ed8997444ade5b33ce2ee8ac1b01dcbe3db368c3fce405"} Nov 29 04:33:29 crc kubenswrapper[4631]: I1129 04:33:29.763018 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20","Type":"ContainerStarted","Data":"dceaf719da1cf858a472c0e918a9f150cef6868f9d8e71c3a55337e4ce94d102"} Nov 29 04:33:30 crc kubenswrapper[4631]: I1129 04:33:30.779450 4631 generic.go:334] "Generic (PLEG): container finished" podID="f7b82554-0254-4b14-8983-e70c42a48315" containerID="d295df1ad8b80ceb227232d22e1c0e9b4d8bd98ddadd7eabfa9dcd17c3c045c9" exitCode=0 Nov 29 04:33:30 crc kubenswrapper[4631]: I1129 04:33:30.779754 4631 generic.go:334] "Generic (PLEG): container finished" podID="f7b82554-0254-4b14-8983-e70c42a48315" containerID="60d2797809b903548b97779c909345691dc7d36bce26c2841cc3c08f714b5c7d" exitCode=2 Nov 29 04:33:30 crc kubenswrapper[4631]: I1129 04:33:30.779770 4631 generic.go:334] "Generic (PLEG): container finished" podID="f7b82554-0254-4b14-8983-e70c42a48315" containerID="eb8d793ae7675e1991a30808202976cc94ca73a2ef52e177344c7128b23ff3ec" exitCode=0 Nov 29 04:33:30 crc kubenswrapper[4631]: I1129 04:33:30.779520 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7b82554-0254-4b14-8983-e70c42a48315","Type":"ContainerDied","Data":"d295df1ad8b80ceb227232d22e1c0e9b4d8bd98ddadd7eabfa9dcd17c3c045c9"} Nov 29 04:33:30 crc kubenswrapper[4631]: I1129 04:33:30.779845 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7b82554-0254-4b14-8983-e70c42a48315","Type":"ContainerDied","Data":"60d2797809b903548b97779c909345691dc7d36bce26c2841cc3c08f714b5c7d"} Nov 29 04:33:30 crc kubenswrapper[4631]: I1129 04:33:30.779865 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7b82554-0254-4b14-8983-e70c42a48315","Type":"ContainerDied","Data":"eb8d793ae7675e1991a30808202976cc94ca73a2ef52e177344c7128b23ff3ec"} Nov 29 04:33:30 crc kubenswrapper[4631]: I1129 04:33:30.784602 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e8d69e47-05ab-43fb-b75f-ad96caf08df0","Type":"ContainerStarted","Data":"2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e"} Nov 29 04:33:30 crc kubenswrapper[4631]: I1129 04:33:30.816022 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.81600179 podStartE2EDuration="3.81600179s" podCreationTimestamp="2025-11-29 04:33:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:33:30.815483437 +0000 UTC m=+1337.879987011" watchObservedRunningTime="2025-11-29 04:33:30.81600179 +0000 UTC m=+1337.880505314" Nov 29 04:33:31 crc kubenswrapper[4631]: I1129 04:33:31.068216 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 04:33:31 crc kubenswrapper[4631]: I1129 04:33:31.068504 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 04:33:32 crc kubenswrapper[4631]: I1129 04:33:32.809890 4631 generic.go:334] "Generic (PLEG): container finished" podID="f7b82554-0254-4b14-8983-e70c42a48315" containerID="2d89503d2a3f51fcceb3f6f7b8266ba5f977719bf144c8248bd2ce9f7450df5c" exitCode=0 Nov 29 04:33:32 crc kubenswrapper[4631]: I1129 04:33:32.809949 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7b82554-0254-4b14-8983-e70c42a48315","Type":"ContainerDied","Data":"2d89503d2a3f51fcceb3f6f7b8266ba5f977719bf144c8248bd2ce9f7450df5c"} Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.696575 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.779614 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-log-httpd\") pod \"f7b82554-0254-4b14-8983-e70c42a48315\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.779721 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-scripts\") pod \"f7b82554-0254-4b14-8983-e70c42a48315\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.779787 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-config-data\") pod \"f7b82554-0254-4b14-8983-e70c42a48315\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.779827 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngl2f\" (UniqueName: \"kubernetes.io/projected/f7b82554-0254-4b14-8983-e70c42a48315-kube-api-access-ngl2f\") pod \"f7b82554-0254-4b14-8983-e70c42a48315\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.779855 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-run-httpd\") pod \"f7b82554-0254-4b14-8983-e70c42a48315\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.779888 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-combined-ca-bundle\") pod \"f7b82554-0254-4b14-8983-e70c42a48315\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.779944 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-sg-core-conf-yaml\") pod \"f7b82554-0254-4b14-8983-e70c42a48315\" (UID: \"f7b82554-0254-4b14-8983-e70c42a48315\") " Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.780049 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f7b82554-0254-4b14-8983-e70c42a48315" (UID: "f7b82554-0254-4b14-8983-e70c42a48315"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.780379 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f7b82554-0254-4b14-8983-e70c42a48315" (UID: "f7b82554-0254-4b14-8983-e70c42a48315"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.780925 4631 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.780942 4631 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7b82554-0254-4b14-8983-e70c42a48315-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.786861 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7b82554-0254-4b14-8983-e70c42a48315-kube-api-access-ngl2f" (OuterVolumeSpecName: "kube-api-access-ngl2f") pod "f7b82554-0254-4b14-8983-e70c42a48315" (UID: "f7b82554-0254-4b14-8983-e70c42a48315"). InnerVolumeSpecName "kube-api-access-ngl2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.787232 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-scripts" (OuterVolumeSpecName: "scripts") pod "f7b82554-0254-4b14-8983-e70c42a48315" (UID: "f7b82554-0254-4b14-8983-e70c42a48315"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.821918 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.822123 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7b82554-0254-4b14-8983-e70c42a48315","Type":"ContainerDied","Data":"425b2264648533413221a1781856dbd45278d72e268a38d0fae89f0873cb36bf"} Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.822191 4631 scope.go:117] "RemoveContainer" containerID="d295df1ad8b80ceb227232d22e1c0e9b4d8bd98ddadd7eabfa9dcd17c3c045c9" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.823437 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c4c8b52f-72c8-4aac-9c57-df83ec5dfe20","Type":"ContainerStarted","Data":"fc542c05e9626d18dadb21870c7145df94d7ff94b63d947eeefe9b4e60a02444"} Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.825018 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.825969 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f7b82554-0254-4b14-8983-e70c42a48315" (UID: "f7b82554-0254-4b14-8983-e70c42a48315"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.845457 4631 scope.go:117] "RemoveContainer" containerID="60d2797809b903548b97779c909345691dc7d36bce26c2841cc3c08f714b5c7d" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.854700 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.05543658 podStartE2EDuration="5.854683901s" podCreationTimestamp="2025-11-29 04:33:28 +0000 UTC" firstStartedPulling="2025-11-29 04:33:29.47070135 +0000 UTC m=+1336.535204864" lastFinishedPulling="2025-11-29 04:33:33.269948671 +0000 UTC m=+1340.334452185" observedRunningTime="2025-11-29 04:33:33.841480638 +0000 UTC m=+1340.905984182" watchObservedRunningTime="2025-11-29 04:33:33.854683901 +0000 UTC m=+1340.919187415" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.866082 4631 scope.go:117] "RemoveContainer" containerID="2d89503d2a3f51fcceb3f6f7b8266ba5f977719bf144c8248bd2ce9f7450df5c" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.869654 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f7b82554-0254-4b14-8983-e70c42a48315" (UID: "f7b82554-0254-4b14-8983-e70c42a48315"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.882896 4631 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.882944 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.882954 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngl2f\" (UniqueName: \"kubernetes.io/projected/f7b82554-0254-4b14-8983-e70c42a48315-kube-api-access-ngl2f\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.882963 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.885450 4631 scope.go:117] "RemoveContainer" containerID="eb8d793ae7675e1991a30808202976cc94ca73a2ef52e177344c7128b23ff3ec" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.886709 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-config-data" (OuterVolumeSpecName: "config-data") pod "f7b82554-0254-4b14-8983-e70c42a48315" (UID: "f7b82554-0254-4b14-8983-e70c42a48315"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:33 crc kubenswrapper[4631]: I1129 04:33:33.985779 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b82554-0254-4b14-8983-e70c42a48315-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.059674 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.075971 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.152595 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.161086 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.175414 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:34 crc kubenswrapper[4631]: E1129 04:33:34.175768 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="proxy-httpd" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.175783 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="proxy-httpd" Nov 29 04:33:34 crc kubenswrapper[4631]: E1129 04:33:34.175795 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="ceilometer-central-agent" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.175805 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="ceilometer-central-agent" Nov 29 04:33:34 crc kubenswrapper[4631]: E1129 04:33:34.175842 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="sg-core" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.175848 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="sg-core" Nov 29 04:33:34 crc kubenswrapper[4631]: E1129 04:33:34.175856 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="ceilometer-notification-agent" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.175861 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="ceilometer-notification-agent" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.176041 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="ceilometer-notification-agent" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.176064 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="proxy-httpd" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.176071 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="sg-core" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.176079 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b82554-0254-4b14-8983-e70c42a48315" containerName="ceilometer-central-agent" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.177995 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.179780 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.182642 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.184604 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.189349 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.293723 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.293764 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-run-httpd\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.293811 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.293882 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-log-httpd\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.294142 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-scripts\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.294210 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-config-data\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.294233 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2zts\" (UniqueName: \"kubernetes.io/projected/f8b6c6f8-9441-4508-a617-d22255a1e075-kube-api-access-m2zts\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.294357 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.396771 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.397175 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-log-httpd\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.397481 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-scripts\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.397677 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-config-data\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.397860 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2zts\" (UniqueName: \"kubernetes.io/projected/f8b6c6f8-9441-4508-a617-d22255a1e075-kube-api-access-m2zts\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.397696 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-log-httpd\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.398072 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.398425 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.398607 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-run-httpd\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.398880 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-run-httpd\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.401896 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-config-data\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.401997 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-scripts\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.402609 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.404509 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.419989 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2zts\" (UniqueName: \"kubernetes.io/projected/f8b6c6f8-9441-4508-a617-d22255a1e075-kube-api-access-m2zts\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.421700 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.497032 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.864379 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 29 04:33:34 crc kubenswrapper[4631]: I1129 04:33:34.969286 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:34 crc kubenswrapper[4631]: W1129 04:33:34.977436 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8b6c6f8_9441_4508_a617_d22255a1e075.slice/crio-e21d78cf4b53c5693d967f95887407ac2d7bac9c039669fb0c9f179b756318bf WatchSource:0}: Error finding container e21d78cf4b53c5693d967f95887407ac2d7bac9c039669fb0c9f179b756318bf: Status 404 returned error can't find the container with id e21d78cf4b53c5693d967f95887407ac2d7bac9c039669fb0c9f179b756318bf Nov 29 04:33:35 crc kubenswrapper[4631]: I1129 04:33:35.227478 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7b82554-0254-4b14-8983-e70c42a48315" path="/var/lib/kubelet/pods/f7b82554-0254-4b14-8983-e70c42a48315/volumes" Nov 29 04:33:35 crc kubenswrapper[4631]: I1129 04:33:35.842209 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8b6c6f8-9441-4508-a617-d22255a1e075","Type":"ContainerStarted","Data":"3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea"} Nov 29 04:33:35 crc kubenswrapper[4631]: I1129 04:33:35.842463 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8b6c6f8-9441-4508-a617-d22255a1e075","Type":"ContainerStarted","Data":"e21d78cf4b53c5693d967f95887407ac2d7bac9c039669fb0c9f179b756318bf"} Nov 29 04:33:36 crc kubenswrapper[4631]: I1129 04:33:36.068252 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 29 04:33:36 crc kubenswrapper[4631]: I1129 04:33:36.068315 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 29 04:33:36 crc kubenswrapper[4631]: I1129 04:33:36.853580 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8b6c6f8-9441-4508-a617-d22255a1e075","Type":"ContainerStarted","Data":"f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082"} Nov 29 04:33:36 crc kubenswrapper[4631]: I1129 04:33:36.855256 4631 generic.go:334] "Generic (PLEG): container finished" podID="b7e732a2-0a21-4bd8-af75-1ac34236fa2d" containerID="9358c3a7d53991a3e5272bf14397fb1d5550a01dd56b7b2b7cfc9f7a881b0bfe" exitCode=0 Nov 29 04:33:36 crc kubenswrapper[4631]: I1129 04:33:36.855290 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9mvkm" event={"ID":"b7e732a2-0a21-4bd8-af75-1ac34236fa2d","Type":"ContainerDied","Data":"9358c3a7d53991a3e5272bf14397fb1d5550a01dd56b7b2b7cfc9f7a881b0bfe"} Nov 29 04:33:37 crc kubenswrapper[4631]: I1129 04:33:37.081737 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:33:37 crc kubenswrapper[4631]: I1129 04:33:37.081772 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.208030 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.208287 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.350491 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.485148 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpclm\" (UniqueName: \"kubernetes.io/projected/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-kube-api-access-dpclm\") pod \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.485236 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-scripts\") pod \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.485293 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-combined-ca-bundle\") pod \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.485356 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-config-data\") pod \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\" (UID: \"b7e732a2-0a21-4bd8-af75-1ac34236fa2d\") " Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.501452 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-scripts" (OuterVolumeSpecName: "scripts") pod "b7e732a2-0a21-4bd8-af75-1ac34236fa2d" (UID: "b7e732a2-0a21-4bd8-af75-1ac34236fa2d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.501662 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-kube-api-access-dpclm" (OuterVolumeSpecName: "kube-api-access-dpclm") pod "b7e732a2-0a21-4bd8-af75-1ac34236fa2d" (UID: "b7e732a2-0a21-4bd8-af75-1ac34236fa2d"). InnerVolumeSpecName "kube-api-access-dpclm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.522746 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-config-data" (OuterVolumeSpecName: "config-data") pod "b7e732a2-0a21-4bd8-af75-1ac34236fa2d" (UID: "b7e732a2-0a21-4bd8-af75-1ac34236fa2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.529418 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b7e732a2-0a21-4bd8-af75-1ac34236fa2d" (UID: "b7e732a2-0a21-4bd8-af75-1ac34236fa2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.587592 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.587630 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.587645 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.587657 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpclm\" (UniqueName: \"kubernetes.io/projected/b7e732a2-0a21-4bd8-af75-1ac34236fa2d-kube-api-access-dpclm\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.877499 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8b6c6f8-9441-4508-a617-d22255a1e075","Type":"ContainerStarted","Data":"729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664"} Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.879170 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9mvkm" event={"ID":"b7e732a2-0a21-4bd8-af75-1ac34236fa2d","Type":"ContainerDied","Data":"8decc75382a662293f9832d62c4857457fe0befbc2f7aeac9aa07fbe23f6af56"} Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.879210 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8decc75382a662293f9832d62c4857457fe0befbc2f7aeac9aa07fbe23f6af56" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.879267 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9mvkm" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.982787 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 29 04:33:38 crc kubenswrapper[4631]: E1129 04:33:38.983137 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7e732a2-0a21-4bd8-af75-1ac34236fa2d" containerName="nova-cell1-conductor-db-sync" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.983153 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7e732a2-0a21-4bd8-af75-1ac34236fa2d" containerName="nova-cell1-conductor-db-sync" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.983459 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7e732a2-0a21-4bd8-af75-1ac34236fa2d" containerName="nova-cell1-conductor-db-sync" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.984018 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:38 crc kubenswrapper[4631]: I1129 04:33:38.988178 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.011952 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.097913 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b334d5ab-4b3b-436f-bf43-54c2a6a511b0-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b334d5ab-4b3b-436f-bf43-54c2a6a511b0\") " pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.098026 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b334d5ab-4b3b-436f-bf43-54c2a6a511b0-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b334d5ab-4b3b-436f-bf43-54c2a6a511b0\") " pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.098126 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxvwr\" (UniqueName: \"kubernetes.io/projected/b334d5ab-4b3b-436f-bf43-54c2a6a511b0-kube-api-access-zxvwr\") pod \"nova-cell1-conductor-0\" (UID: \"b334d5ab-4b3b-436f-bf43-54c2a6a511b0\") " pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.192245 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.200186 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b334d5ab-4b3b-436f-bf43-54c2a6a511b0-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b334d5ab-4b3b-436f-bf43-54c2a6a511b0\") " pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.200314 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxvwr\" (UniqueName: \"kubernetes.io/projected/b334d5ab-4b3b-436f-bf43-54c2a6a511b0-kube-api-access-zxvwr\") pod \"nova-cell1-conductor-0\" (UID: \"b334d5ab-4b3b-436f-bf43-54c2a6a511b0\") " pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.200388 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b334d5ab-4b3b-436f-bf43-54c2a6a511b0-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b334d5ab-4b3b-436f-bf43-54c2a6a511b0\") " pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.204097 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b334d5ab-4b3b-436f-bf43-54c2a6a511b0-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b334d5ab-4b3b-436f-bf43-54c2a6a511b0\") " pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.204965 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b334d5ab-4b3b-436f-bf43-54c2a6a511b0-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b334d5ab-4b3b-436f-bf43-54c2a6a511b0\") " pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.216026 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxvwr\" (UniqueName: \"kubernetes.io/projected/b334d5ab-4b3b-436f-bf43-54c2a6a511b0-kube-api-access-zxvwr\") pod \"nova-cell1-conductor-0\" (UID: \"b334d5ab-4b3b-436f-bf43-54c2a6a511b0\") " pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.290571 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.290578 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.314939 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.795096 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 29 04:33:39 crc kubenswrapper[4631]: W1129 04:33:39.810249 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb334d5ab_4b3b_436f_bf43_54c2a6a511b0.slice/crio-ff70493b64dc89ed666bd28b74dbcc5ee0727936ba3041c3f64dc2247d4a092f WatchSource:0}: Error finding container ff70493b64dc89ed666bd28b74dbcc5ee0727936ba3041c3f64dc2247d4a092f: Status 404 returned error can't find the container with id ff70493b64dc89ed666bd28b74dbcc5ee0727936ba3041c3f64dc2247d4a092f Nov 29 04:33:39 crc kubenswrapper[4631]: I1129 04:33:39.887968 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b334d5ab-4b3b-436f-bf43-54c2a6a511b0","Type":"ContainerStarted","Data":"ff70493b64dc89ed666bd28b74dbcc5ee0727936ba3041c3f64dc2247d4a092f"} Nov 29 04:33:43 crc kubenswrapper[4631]: I1129 04:33:43.944010 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b334d5ab-4b3b-436f-bf43-54c2a6a511b0","Type":"ContainerStarted","Data":"f37fd45a57ff78bca10691283e1a35eefd88196dfabed999da2479197fb65ccb"} Nov 29 04:33:43 crc kubenswrapper[4631]: I1129 04:33:43.944318 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:43 crc kubenswrapper[4631]: I1129 04:33:43.978389 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=5.978371426 podStartE2EDuration="5.978371426s" podCreationTimestamp="2025-11-29 04:33:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:33:43.971836316 +0000 UTC m=+1351.036339830" watchObservedRunningTime="2025-11-29 04:33:43.978371426 +0000 UTC m=+1351.042874950" Nov 29 04:33:44 crc kubenswrapper[4631]: I1129 04:33:44.954756 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8b6c6f8-9441-4508-a617-d22255a1e075","Type":"ContainerStarted","Data":"eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e"} Nov 29 04:33:44 crc kubenswrapper[4631]: I1129 04:33:44.955440 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 04:33:44 crc kubenswrapper[4631]: I1129 04:33:44.994081 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.889047902 podStartE2EDuration="10.994061935s" podCreationTimestamp="2025-11-29 04:33:34 +0000 UTC" firstStartedPulling="2025-11-29 04:33:34.979351715 +0000 UTC m=+1342.043855229" lastFinishedPulling="2025-11-29 04:33:44.084365748 +0000 UTC m=+1351.148869262" observedRunningTime="2025-11-29 04:33:44.98118688 +0000 UTC m=+1352.045690394" watchObservedRunningTime="2025-11-29 04:33:44.994061935 +0000 UTC m=+1352.058565459" Nov 29 04:33:46 crc kubenswrapper[4631]: I1129 04:33:46.079924 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 29 04:33:46 crc kubenswrapper[4631]: I1129 04:33:46.082987 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 29 04:33:46 crc kubenswrapper[4631]: I1129 04:33:46.087240 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 29 04:33:46 crc kubenswrapper[4631]: I1129 04:33:46.985408 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 29 04:33:48 crc kubenswrapper[4631]: I1129 04:33:48.214647 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 29 04:33:48 crc kubenswrapper[4631]: I1129 04:33:48.215665 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 29 04:33:48 crc kubenswrapper[4631]: I1129 04:33:48.223064 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 29 04:33:48 crc kubenswrapper[4631]: I1129 04:33:48.225559 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.011221 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.028203 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.345411 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.474218 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-mxxl2"] Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.478719 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.528793 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-mxxl2"] Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.573574 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.573707 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-config\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.573757 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.573896 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.573940 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44lfm\" (UniqueName: \"kubernetes.io/projected/698c63b4-a1ab-4de8-aab5-b3a676703d4b-kube-api-access-44lfm\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.573989 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.676003 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-config\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.676294 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.676967 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-config\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.677002 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.677088 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.677136 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44lfm\" (UniqueName: \"kubernetes.io/projected/698c63b4-a1ab-4de8-aab5-b3a676703d4b-kube-api-access-44lfm\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.677655 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.677716 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.678237 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.678428 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.678946 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.703239 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44lfm\" (UniqueName: \"kubernetes.io/projected/698c63b4-a1ab-4de8-aab5-b3a676703d4b-kube-api-access-44lfm\") pod \"dnsmasq-dns-89c5cd4d5-mxxl2\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:49 crc kubenswrapper[4631]: I1129 04:33:49.795760 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.313389 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-mxxl2"] Nov 29 04:33:50 crc kubenswrapper[4631]: W1129 04:33:50.318728 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod698c63b4_a1ab_4de8_aab5_b3a676703d4b.slice/crio-55cdbc36d5b33a9c576321ea985a5213da182b2820ae7836bbad8bca5954b7ee WatchSource:0}: Error finding container 55cdbc36d5b33a9c576321ea985a5213da182b2820ae7836bbad8bca5954b7ee: Status 404 returned error can't find the container with id 55cdbc36d5b33a9c576321ea985a5213da182b2820ae7836bbad8bca5954b7ee Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.542556 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-kbfj8"] Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.544085 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.548536 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.548721 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.562783 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-kbfj8"] Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.698302 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-scripts\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.698368 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6z49\" (UniqueName: \"kubernetes.io/projected/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-kube-api-access-f6z49\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.698418 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-config-data\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.698444 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.800078 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-scripts\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.800133 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6z49\" (UniqueName: \"kubernetes.io/projected/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-kube-api-access-f6z49\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.800180 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-config-data\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.800205 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.805269 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-config-data\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.808066 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.808927 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-scripts\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.818944 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6z49\" (UniqueName: \"kubernetes.io/projected/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-kube-api-access-f6z49\") pod \"nova-cell1-cell-mapping-kbfj8\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:50 crc kubenswrapper[4631]: I1129 04:33:50.859832 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:51 crc kubenswrapper[4631]: I1129 04:33:51.045814 4631 generic.go:334] "Generic (PLEG): container finished" podID="698c63b4-a1ab-4de8-aab5-b3a676703d4b" containerID="ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a" exitCode=0 Nov 29 04:33:51 crc kubenswrapper[4631]: I1129 04:33:51.046080 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" event={"ID":"698c63b4-a1ab-4de8-aab5-b3a676703d4b","Type":"ContainerDied","Data":"ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a"} Nov 29 04:33:51 crc kubenswrapper[4631]: I1129 04:33:51.046105 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" event={"ID":"698c63b4-a1ab-4de8-aab5-b3a676703d4b","Type":"ContainerStarted","Data":"55cdbc36d5b33a9c576321ea985a5213da182b2820ae7836bbad8bca5954b7ee"} Nov 29 04:33:51 crc kubenswrapper[4631]: I1129 04:33:51.332167 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-kbfj8"] Nov 29 04:33:51 crc kubenswrapper[4631]: W1129 04:33:51.341611 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7877ec7_2f8b_4482_9a82_7a37e8c0ad47.slice/crio-c1064fda607d6aa7bce4871fac76fdaff59570d214db74fdd9eac1c558e8ae7b WatchSource:0}: Error finding container c1064fda607d6aa7bce4871fac76fdaff59570d214db74fdd9eac1c558e8ae7b: Status 404 returned error can't find the container with id c1064fda607d6aa7bce4871fac76fdaff59570d214db74fdd9eac1c558e8ae7b Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.055577 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-kbfj8" event={"ID":"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47","Type":"ContainerStarted","Data":"5cf5559bf9fe3fea6ccad5f20cfebddd0d91377639ad29c9f62483d1074a032b"} Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.055851 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-kbfj8" event={"ID":"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47","Type":"ContainerStarted","Data":"c1064fda607d6aa7bce4871fac76fdaff59570d214db74fdd9eac1c558e8ae7b"} Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.058037 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" event={"ID":"698c63b4-a1ab-4de8-aab5-b3a676703d4b","Type":"ContainerStarted","Data":"9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234"} Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.058208 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.075875 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-kbfj8" podStartSLOduration=2.07585278 podStartE2EDuration="2.07585278s" podCreationTimestamp="2025-11-29 04:33:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:33:52.068097111 +0000 UTC m=+1359.132600625" watchObservedRunningTime="2025-11-29 04:33:52.07585278 +0000 UTC m=+1359.140356294" Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.086656 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" podStartSLOduration=3.086640984 podStartE2EDuration="3.086640984s" podCreationTimestamp="2025-11-29 04:33:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:33:52.084515222 +0000 UTC m=+1359.149018736" watchObservedRunningTime="2025-11-29 04:33:52.086640984 +0000 UTC m=+1359.151144498" Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.229974 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.230188 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerName="nova-api-log" containerID="cri-o://45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea" gracePeriod=30 Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.230268 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerName="nova-api-api" containerID="cri-o://2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e" gracePeriod=30 Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.543845 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.544111 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="ceilometer-central-agent" containerID="cri-o://3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea" gracePeriod=30 Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.544265 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="proxy-httpd" containerID="cri-o://eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e" gracePeriod=30 Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.544305 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="sg-core" containerID="cri-o://729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664" gracePeriod=30 Nov 29 04:33:52 crc kubenswrapper[4631]: I1129 04:33:52.544348 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="ceilometer-notification-agent" containerID="cri-o://f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082" gracePeriod=30 Nov 29 04:33:53 crc kubenswrapper[4631]: I1129 04:33:53.070677 4631 generic.go:334] "Generic (PLEG): container finished" podID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerID="eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e" exitCode=0 Nov 29 04:33:53 crc kubenswrapper[4631]: I1129 04:33:53.070992 4631 generic.go:334] "Generic (PLEG): container finished" podID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerID="729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664" exitCode=2 Nov 29 04:33:53 crc kubenswrapper[4631]: I1129 04:33:53.071001 4631 generic.go:334] "Generic (PLEG): container finished" podID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerID="3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea" exitCode=0 Nov 29 04:33:53 crc kubenswrapper[4631]: I1129 04:33:53.070775 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8b6c6f8-9441-4508-a617-d22255a1e075","Type":"ContainerDied","Data":"eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e"} Nov 29 04:33:53 crc kubenswrapper[4631]: I1129 04:33:53.071079 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8b6c6f8-9441-4508-a617-d22255a1e075","Type":"ContainerDied","Data":"729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664"} Nov 29 04:33:53 crc kubenswrapper[4631]: I1129 04:33:53.071092 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8b6c6f8-9441-4508-a617-d22255a1e075","Type":"ContainerDied","Data":"3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea"} Nov 29 04:33:53 crc kubenswrapper[4631]: I1129 04:33:53.073321 4631 generic.go:334] "Generic (PLEG): container finished" podID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerID="45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea" exitCode=143 Nov 29 04:33:53 crc kubenswrapper[4631]: I1129 04:33:53.073463 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e8d69e47-05ab-43fb-b75f-ad96caf08df0","Type":"ContainerDied","Data":"45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea"} Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.824648 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.981243 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-run-httpd\") pod \"f8b6c6f8-9441-4508-a617-d22255a1e075\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.981345 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-combined-ca-bundle\") pod \"f8b6c6f8-9441-4508-a617-d22255a1e075\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.981436 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-log-httpd\") pod \"f8b6c6f8-9441-4508-a617-d22255a1e075\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.981464 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-ceilometer-tls-certs\") pod \"f8b6c6f8-9441-4508-a617-d22255a1e075\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.981489 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-sg-core-conf-yaml\") pod \"f8b6c6f8-9441-4508-a617-d22255a1e075\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.981542 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-config-data\") pod \"f8b6c6f8-9441-4508-a617-d22255a1e075\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.981560 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2zts\" (UniqueName: \"kubernetes.io/projected/f8b6c6f8-9441-4508-a617-d22255a1e075-kube-api-access-m2zts\") pod \"f8b6c6f8-9441-4508-a617-d22255a1e075\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.981599 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-scripts\") pod \"f8b6c6f8-9441-4508-a617-d22255a1e075\" (UID: \"f8b6c6f8-9441-4508-a617-d22255a1e075\") " Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.982775 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f8b6c6f8-9441-4508-a617-d22255a1e075" (UID: "f8b6c6f8-9441-4508-a617-d22255a1e075"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.982863 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f8b6c6f8-9441-4508-a617-d22255a1e075" (UID: "f8b6c6f8-9441-4508-a617-d22255a1e075"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:33:54 crc kubenswrapper[4631]: I1129 04:33:54.988287 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-scripts" (OuterVolumeSpecName: "scripts") pod "f8b6c6f8-9441-4508-a617-d22255a1e075" (UID: "f8b6c6f8-9441-4508-a617-d22255a1e075"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.005005 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8b6c6f8-9441-4508-a617-d22255a1e075-kube-api-access-m2zts" (OuterVolumeSpecName: "kube-api-access-m2zts") pod "f8b6c6f8-9441-4508-a617-d22255a1e075" (UID: "f8b6c6f8-9441-4508-a617-d22255a1e075"). InnerVolumeSpecName "kube-api-access-m2zts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.008689 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f8b6c6f8-9441-4508-a617-d22255a1e075" (UID: "f8b6c6f8-9441-4508-a617-d22255a1e075"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.060148 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f8b6c6f8-9441-4508-a617-d22255a1e075" (UID: "f8b6c6f8-9441-4508-a617-d22255a1e075"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.071011 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f8b6c6f8-9441-4508-a617-d22255a1e075" (UID: "f8b6c6f8-9441-4508-a617-d22255a1e075"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.083580 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2zts\" (UniqueName: \"kubernetes.io/projected/f8b6c6f8-9441-4508-a617-d22255a1e075-kube-api-access-m2zts\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.083854 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.083923 4631 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.083978 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.084049 4631 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8b6c6f8-9441-4508-a617-d22255a1e075-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.084101 4631 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.084154 4631 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.090619 4631 generic.go:334] "Generic (PLEG): container finished" podID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerID="f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082" exitCode=0 Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.090753 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8b6c6f8-9441-4508-a617-d22255a1e075","Type":"ContainerDied","Data":"f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082"} Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.090832 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8b6c6f8-9441-4508-a617-d22255a1e075","Type":"ContainerDied","Data":"e21d78cf4b53c5693d967f95887407ac2d7bac9c039669fb0c9f179b756318bf"} Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.090913 4631 scope.go:117] "RemoveContainer" containerID="eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.091080 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.106317 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-config-data" (OuterVolumeSpecName: "config-data") pod "f8b6c6f8-9441-4508-a617-d22255a1e075" (UID: "f8b6c6f8-9441-4508-a617-d22255a1e075"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.139345 4631 scope.go:117] "RemoveContainer" containerID="729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.156670 4631 scope.go:117] "RemoveContainer" containerID="f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.179977 4631 scope.go:117] "RemoveContainer" containerID="3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.186361 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8b6c6f8-9441-4508-a617-d22255a1e075-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.200712 4631 scope.go:117] "RemoveContainer" containerID="eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e" Nov 29 04:33:55 crc kubenswrapper[4631]: E1129 04:33:55.201161 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e\": container with ID starting with eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e not found: ID does not exist" containerID="eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.201190 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e"} err="failed to get container status \"eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e\": rpc error: code = NotFound desc = could not find container \"eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e\": container with ID starting with eb6c01e53d909d76dbee43b475be92dcc95ff11c846fb22ea6e0ce21572add1e not found: ID does not exist" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.201211 4631 scope.go:117] "RemoveContainer" containerID="729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664" Nov 29 04:33:55 crc kubenswrapper[4631]: E1129 04:33:55.201667 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664\": container with ID starting with 729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664 not found: ID does not exist" containerID="729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.201706 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664"} err="failed to get container status \"729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664\": rpc error: code = NotFound desc = could not find container \"729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664\": container with ID starting with 729df4be504b3579fcbd3998dacc845fd860e3bb906f5a6a6f1ac8a934b83664 not found: ID does not exist" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.201738 4631 scope.go:117] "RemoveContainer" containerID="f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082" Nov 29 04:33:55 crc kubenswrapper[4631]: E1129 04:33:55.202066 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082\": container with ID starting with f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082 not found: ID does not exist" containerID="f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.202088 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082"} err="failed to get container status \"f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082\": rpc error: code = NotFound desc = could not find container \"f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082\": container with ID starting with f2e1e73d71f683555a693a65bbaa006cb6cc00e2cb9ba67438b675ac4e1dd082 not found: ID does not exist" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.202101 4631 scope.go:117] "RemoveContainer" containerID="3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea" Nov 29 04:33:55 crc kubenswrapper[4631]: E1129 04:33:55.202363 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea\": container with ID starting with 3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea not found: ID does not exist" containerID="3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.202383 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea"} err="failed to get container status \"3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea\": rpc error: code = NotFound desc = could not find container \"3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea\": container with ID starting with 3aa906d38af0f510077ec471ab53d89e1f90a299793558591a76c8fd9562e6ea not found: ID does not exist" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.424138 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.444894 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.452542 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:55 crc kubenswrapper[4631]: E1129 04:33:55.452901 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="ceilometer-notification-agent" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.452916 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="ceilometer-notification-agent" Nov 29 04:33:55 crc kubenswrapper[4631]: E1129 04:33:55.452926 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="proxy-httpd" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.452932 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="proxy-httpd" Nov 29 04:33:55 crc kubenswrapper[4631]: E1129 04:33:55.452941 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="sg-core" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.452947 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="sg-core" Nov 29 04:33:55 crc kubenswrapper[4631]: E1129 04:33:55.452975 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="ceilometer-central-agent" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.452981 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="ceilometer-central-agent" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.453144 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="sg-core" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.453155 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="ceilometer-central-agent" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.453171 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="proxy-httpd" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.453181 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" containerName="ceilometer-notification-agent" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.454793 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.459616 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.459918 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.460028 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.469546 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.593787 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.593862 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad1ef30-44b5-455b-86a8-136862164eba-log-httpd\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.593882 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-config-data\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.593954 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-scripts\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.594109 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.594156 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.594194 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad1ef30-44b5-455b-86a8-136862164eba-run-httpd\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.594250 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmtt4\" (UniqueName: \"kubernetes.io/projected/2ad1ef30-44b5-455b-86a8-136862164eba-kube-api-access-nmtt4\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.695673 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.695903 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad1ef30-44b5-455b-86a8-136862164eba-log-httpd\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.695923 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-config-data\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.695956 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-scripts\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.695980 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.695997 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.696032 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad1ef30-44b5-455b-86a8-136862164eba-run-httpd\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.696063 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmtt4\" (UniqueName: \"kubernetes.io/projected/2ad1ef30-44b5-455b-86a8-136862164eba-kube-api-access-nmtt4\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.700896 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad1ef30-44b5-455b-86a8-136862164eba-log-httpd\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.701014 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad1ef30-44b5-455b-86a8-136862164eba-run-httpd\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.702145 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.703362 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.705691 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-config-data\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.727500 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmtt4\" (UniqueName: \"kubernetes.io/projected/2ad1ef30-44b5-455b-86a8-136862164eba-kube-api-access-nmtt4\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.728687 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-scripts\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.736404 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ad1ef30-44b5-455b-86a8-136862164eba-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2ad1ef30-44b5-455b-86a8-136862164eba\") " pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.828951 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 29 04:33:55 crc kubenswrapper[4631]: I1129 04:33:55.966350 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.094898 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.104433 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-combined-ca-bundle\") pod \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.104504 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvkbs\" (UniqueName: \"kubernetes.io/projected/e8d69e47-05ab-43fb-b75f-ad96caf08df0-kube-api-access-hvkbs\") pod \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.104584 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-config-data\") pod \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.104652 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8d69e47-05ab-43fb-b75f-ad96caf08df0-logs\") pod \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\" (UID: \"e8d69e47-05ab-43fb-b75f-ad96caf08df0\") " Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.105771 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8d69e47-05ab-43fb-b75f-ad96caf08df0-logs" (OuterVolumeSpecName: "logs") pod "e8d69e47-05ab-43fb-b75f-ad96caf08df0" (UID: "e8d69e47-05ab-43fb-b75f-ad96caf08df0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.115074 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8d69e47-05ab-43fb-b75f-ad96caf08df0-kube-api-access-hvkbs" (OuterVolumeSpecName: "kube-api-access-hvkbs") pod "e8d69e47-05ab-43fb-b75f-ad96caf08df0" (UID: "e8d69e47-05ab-43fb-b75f-ad96caf08df0"). InnerVolumeSpecName "kube-api-access-hvkbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.132397 4631 generic.go:334] "Generic (PLEG): container finished" podID="ba30fb10-ed90-4538-ae0d-d041d9e74dcd" containerID="ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c" exitCode=137 Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.132491 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ba30fb10-ed90-4538-ae0d-d041d9e74dcd","Type":"ContainerDied","Data":"ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c"} Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.132520 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ba30fb10-ed90-4538-ae0d-d041d9e74dcd","Type":"ContainerDied","Data":"935cfe9b08a55807adfea35e9433742e5980b98597eeca58de31067f10dda1e4"} Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.132536 4631 scope.go:117] "RemoveContainer" containerID="ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.132758 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.134695 4631 generic.go:334] "Generic (PLEG): container finished" podID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerID="2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e" exitCode=0 Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.134721 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e8d69e47-05ab-43fb-b75f-ad96caf08df0","Type":"ContainerDied","Data":"2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e"} Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.134736 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e8d69e47-05ab-43fb-b75f-ad96caf08df0","Type":"ContainerDied","Data":"332c3dc61578f79392ed8997444ade5b33ce2ee8ac1b01dcbe3db368c3fce405"} Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.134799 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.139688 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8d69e47-05ab-43fb-b75f-ad96caf08df0" (UID: "e8d69e47-05ab-43fb-b75f-ad96caf08df0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.155910 4631 scope.go:117] "RemoveContainer" containerID="ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c" Nov 29 04:33:56 crc kubenswrapper[4631]: E1129 04:33:56.156324 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c\": container with ID starting with ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c not found: ID does not exist" containerID="ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.156379 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c"} err="failed to get container status \"ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c\": rpc error: code = NotFound desc = could not find container \"ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c\": container with ID starting with ef706679f14f3d65f09a531b0860b11fa5f7fee0a7abe350124bed86f7aa043c not found: ID does not exist" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.156402 4631 scope.go:117] "RemoveContainer" containerID="2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.156736 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-config-data" (OuterVolumeSpecName: "config-data") pod "e8d69e47-05ab-43fb-b75f-ad96caf08df0" (UID: "e8d69e47-05ab-43fb-b75f-ad96caf08df0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.182819 4631 scope.go:117] "RemoveContainer" containerID="45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.205873 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-combined-ca-bundle\") pod \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.205937 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m46qq\" (UniqueName: \"kubernetes.io/projected/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-kube-api-access-m46qq\") pod \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.205957 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-config-data\") pod \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\" (UID: \"ba30fb10-ed90-4538-ae0d-d041d9e74dcd\") " Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.206409 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.206426 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvkbs\" (UniqueName: \"kubernetes.io/projected/e8d69e47-05ab-43fb-b75f-ad96caf08df0-kube-api-access-hvkbs\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.206436 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d69e47-05ab-43fb-b75f-ad96caf08df0-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.206445 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8d69e47-05ab-43fb-b75f-ad96caf08df0-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.225154 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-kube-api-access-m46qq" (OuterVolumeSpecName: "kube-api-access-m46qq") pod "ba30fb10-ed90-4538-ae0d-d041d9e74dcd" (UID: "ba30fb10-ed90-4538-ae0d-d041d9e74dcd"). InnerVolumeSpecName "kube-api-access-m46qq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.225422 4631 scope.go:117] "RemoveContainer" containerID="2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e" Nov 29 04:33:56 crc kubenswrapper[4631]: E1129 04:33:56.231862 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e\": container with ID starting with 2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e not found: ID does not exist" containerID="2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.231906 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e"} err="failed to get container status \"2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e\": rpc error: code = NotFound desc = could not find container \"2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e\": container with ID starting with 2cc894c3981eddc923c88ab6b6660d4eb209cb26cebce8eed707168f48f1b60e not found: ID does not exist" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.231933 4631 scope.go:117] "RemoveContainer" containerID="45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea" Nov 29 04:33:56 crc kubenswrapper[4631]: E1129 04:33:56.232214 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea\": container with ID starting with 45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea not found: ID does not exist" containerID="45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.232238 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea"} err="failed to get container status \"45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea\": rpc error: code = NotFound desc = could not find container \"45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea\": container with ID starting with 45207f5f5c636a0d86c120ade12d3af8c6568e14a2387b7dd8e1953e8646d8ea not found: ID does not exist" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.252016 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-config-data" (OuterVolumeSpecName: "config-data") pod "ba30fb10-ed90-4538-ae0d-d041d9e74dcd" (UID: "ba30fb10-ed90-4538-ae0d-d041d9e74dcd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.252456 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba30fb10-ed90-4538-ae0d-d041d9e74dcd" (UID: "ba30fb10-ed90-4538-ae0d-d041d9e74dcd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.308716 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.308742 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m46qq\" (UniqueName: \"kubernetes.io/projected/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-kube-api-access-m46qq\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.308751 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba30fb10-ed90-4538-ae0d-d041d9e74dcd-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.429733 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.481704 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.489303 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.513490 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.529304 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.541719 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:56 crc kubenswrapper[4631]: E1129 04:33:56.542163 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerName="nova-api-api" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.542179 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerName="nova-api-api" Nov 29 04:33:56 crc kubenswrapper[4631]: E1129 04:33:56.542214 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerName="nova-api-log" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.542221 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerName="nova-api-log" Nov 29 04:33:56 crc kubenswrapper[4631]: E1129 04:33:56.542239 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba30fb10-ed90-4538-ae0d-d041d9e74dcd" containerName="nova-scheduler-scheduler" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.542245 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba30fb10-ed90-4538-ae0d-d041d9e74dcd" containerName="nova-scheduler-scheduler" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.542445 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerName="nova-api-api" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.542467 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" containerName="nova-api-log" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.542479 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba30fb10-ed90-4538-ae0d-d041d9e74dcd" containerName="nova-scheduler-scheduler" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.543493 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.547855 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.548057 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.548159 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.550480 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.551950 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.554279 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.556026 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.614723 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-public-tls-certs\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.614946 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbgnp\" (UniqueName: \"kubernetes.io/projected/ff98f2fb-56df-43db-92af-f5e6bf57f302-kube-api-access-wbgnp\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.615070 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.615182 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-config-data\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.615272 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff98f2fb-56df-43db-92af-f5e6bf57f302-logs\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.615387 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.619124 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.717555 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.717808 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nzhn\" (UniqueName: \"kubernetes.io/projected/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-kube-api-access-4nzhn\") pod \"nova-scheduler-0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.717920 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-public-tls-certs\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.718001 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-config-data\") pod \"nova-scheduler-0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.718082 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.718181 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbgnp\" (UniqueName: \"kubernetes.io/projected/ff98f2fb-56df-43db-92af-f5e6bf57f302-kube-api-access-wbgnp\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.718248 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.718407 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-config-data\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.718483 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff98f2fb-56df-43db-92af-f5e6bf57f302-logs\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.718883 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff98f2fb-56df-43db-92af-f5e6bf57f302-logs\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.723124 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-config-data\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.724557 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.726737 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-public-tls-certs\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.729755 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.744624 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbgnp\" (UniqueName: \"kubernetes.io/projected/ff98f2fb-56df-43db-92af-f5e6bf57f302-kube-api-access-wbgnp\") pod \"nova-api-0\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.819606 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nzhn\" (UniqueName: \"kubernetes.io/projected/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-kube-api-access-4nzhn\") pod \"nova-scheduler-0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.819667 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-config-data\") pod \"nova-scheduler-0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.819689 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.822668 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.823839 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-config-data\") pod \"nova-scheduler-0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.838175 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nzhn\" (UniqueName: \"kubernetes.io/projected/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-kube-api-access-4nzhn\") pod \"nova-scheduler-0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " pod="openstack/nova-scheduler-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.894276 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:33:56 crc kubenswrapper[4631]: I1129 04:33:56.919568 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 04:33:57 crc kubenswrapper[4631]: I1129 04:33:57.178035 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad1ef30-44b5-455b-86a8-136862164eba","Type":"ContainerStarted","Data":"f4ec5ee42e9730c67e7c8f199e94da288831983bd6a5a74f467bea06ce8d7813"} Nov 29 04:33:57 crc kubenswrapper[4631]: I1129 04:33:57.185738 4631 generic.go:334] "Generic (PLEG): container finished" podID="a7877ec7-2f8b-4482-9a82-7a37e8c0ad47" containerID="5cf5559bf9fe3fea6ccad5f20cfebddd0d91377639ad29c9f62483d1074a032b" exitCode=0 Nov 29 04:33:57 crc kubenswrapper[4631]: I1129 04:33:57.185781 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-kbfj8" event={"ID":"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47","Type":"ContainerDied","Data":"5cf5559bf9fe3fea6ccad5f20cfebddd0d91377639ad29c9f62483d1074a032b"} Nov 29 04:33:57 crc kubenswrapper[4631]: I1129 04:33:57.238931 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba30fb10-ed90-4538-ae0d-d041d9e74dcd" path="/var/lib/kubelet/pods/ba30fb10-ed90-4538-ae0d-d041d9e74dcd/volumes" Nov 29 04:33:57 crc kubenswrapper[4631]: I1129 04:33:57.239690 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8d69e47-05ab-43fb-b75f-ad96caf08df0" path="/var/lib/kubelet/pods/e8d69e47-05ab-43fb-b75f-ad96caf08df0/volumes" Nov 29 04:33:57 crc kubenswrapper[4631]: I1129 04:33:57.240248 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8b6c6f8-9441-4508-a617-d22255a1e075" path="/var/lib/kubelet/pods/f8b6c6f8-9441-4508-a617-d22255a1e075/volumes" Nov 29 04:33:57 crc kubenswrapper[4631]: I1129 04:33:57.241526 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:57 crc kubenswrapper[4631]: I1129 04:33:57.485050 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.210233 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff98f2fb-56df-43db-92af-f5e6bf57f302","Type":"ContainerStarted","Data":"28b4a20f8269bbdc02b64646b3edb32b48a8f9cb14abd7e69c69a45a49caa7fc"} Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.212161 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff98f2fb-56df-43db-92af-f5e6bf57f302","Type":"ContainerStarted","Data":"21fcac4a83c8a8d62025fab690833751b78f5fd410f8f7c9c84680ef92b44c90"} Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.212328 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff98f2fb-56df-43db-92af-f5e6bf57f302","Type":"ContainerStarted","Data":"4642646e20578c98af295bfe9b5b21cdbc64baba5833ba12bfc51e5e49999525"} Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.214377 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0","Type":"ContainerStarted","Data":"549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46"} Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.214572 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0","Type":"ContainerStarted","Data":"a052958a8482f8464db75630693d98338ed1841c164cb2159cf6bf640757a3a4"} Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.216106 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad1ef30-44b5-455b-86a8-136862164eba","Type":"ContainerStarted","Data":"0c6f9c871f791756c6b5f2716cd28035220c6da137be0cf3213f26d2f23a3971"} Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.599300 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.754859 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-config-data\") pod \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.754909 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-combined-ca-bundle\") pod \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.754988 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-scripts\") pod \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.755028 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6z49\" (UniqueName: \"kubernetes.io/projected/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-kube-api-access-f6z49\") pod \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\" (UID: \"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47\") " Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.759495 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-kube-api-access-f6z49" (OuterVolumeSpecName: "kube-api-access-f6z49") pod "a7877ec7-2f8b-4482-9a82-7a37e8c0ad47" (UID: "a7877ec7-2f8b-4482-9a82-7a37e8c0ad47"). InnerVolumeSpecName "kube-api-access-f6z49". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.760910 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-scripts" (OuterVolumeSpecName: "scripts") pod "a7877ec7-2f8b-4482-9a82-7a37e8c0ad47" (UID: "a7877ec7-2f8b-4482-9a82-7a37e8c0ad47"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.782129 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7877ec7-2f8b-4482-9a82-7a37e8c0ad47" (UID: "a7877ec7-2f8b-4482-9a82-7a37e8c0ad47"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.822728 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-config-data" (OuterVolumeSpecName: "config-data") pod "a7877ec7-2f8b-4482-9a82-7a37e8c0ad47" (UID: "a7877ec7-2f8b-4482-9a82-7a37e8c0ad47"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.857377 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.857409 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.857435 4631 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-scripts\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:58 crc kubenswrapper[4631]: I1129 04:33:58.857444 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6z49\" (UniqueName: \"kubernetes.io/projected/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47-kube-api-access-f6z49\") on node \"crc\" DevicePath \"\"" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.233184 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-kbfj8" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.233269 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-kbfj8" event={"ID":"a7877ec7-2f8b-4482-9a82-7a37e8c0ad47","Type":"ContainerDied","Data":"c1064fda607d6aa7bce4871fac76fdaff59570d214db74fdd9eac1c558e8ae7b"} Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.233799 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1064fda607d6aa7bce4871fac76fdaff59570d214db74fdd9eac1c558e8ae7b" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.289000 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.288974493 podStartE2EDuration="3.288974493s" podCreationTimestamp="2025-11-29 04:33:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:33:59.259036987 +0000 UTC m=+1366.323540501" watchObservedRunningTime="2025-11-29 04:33:59.288974493 +0000 UTC m=+1366.353478017" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.295705 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.295692688 podStartE2EDuration="3.295692688s" podCreationTimestamp="2025-11-29 04:33:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:33:59.279241463 +0000 UTC m=+1366.343744997" watchObservedRunningTime="2025-11-29 04:33:59.295692688 +0000 UTC m=+1366.360196202" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.413145 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.417997 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.470970 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.471225 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-log" containerID="cri-o://f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59" gracePeriod=30 Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.471619 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-metadata" containerID="cri-o://202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602" gracePeriod=30 Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.798497 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.806950 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7vh9h"] Nov 29 04:33:59 crc kubenswrapper[4631]: E1129 04:33:59.807525 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7877ec7-2f8b-4482-9a82-7a37e8c0ad47" containerName="nova-manage" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.807640 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7877ec7-2f8b-4482-9a82-7a37e8c0ad47" containerName="nova-manage" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.807950 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7877ec7-2f8b-4482-9a82-7a37e8c0ad47" containerName="nova-manage" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.809652 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.820461 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7vh9h"] Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.880719 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-catalog-content\") pod \"redhat-operators-7vh9h\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.880751 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x42h\" (UniqueName: \"kubernetes.io/projected/40e879cd-6972-40b3-9c4c-63c5404487b2-kube-api-access-4x42h\") pod \"redhat-operators-7vh9h\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.880816 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-utilities\") pod \"redhat-operators-7vh9h\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.884935 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2mddc"] Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.885165 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" podUID="005dc9bc-563c-460c-9c82-2203b2512a69" containerName="dnsmasq-dns" containerID="cri-o://a0b1bd5c274c1934b738fca3d643c1f6bd6d0e8b4c57f7be1c79e36ce4f822cf" gracePeriod=10 Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.986190 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-catalog-content\") pod \"redhat-operators-7vh9h\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.986226 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x42h\" (UniqueName: \"kubernetes.io/projected/40e879cd-6972-40b3-9c4c-63c5404487b2-kube-api-access-4x42h\") pod \"redhat-operators-7vh9h\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.986667 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-catalog-content\") pod \"redhat-operators-7vh9h\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.986270 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-utilities\") pod \"redhat-operators-7vh9h\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:33:59 crc kubenswrapper[4631]: I1129 04:33:59.986940 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-utilities\") pod \"redhat-operators-7vh9h\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.003761 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x42h\" (UniqueName: \"kubernetes.io/projected/40e879cd-6972-40b3-9c4c-63c5404487b2-kube-api-access-4x42h\") pod \"redhat-operators-7vh9h\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.128713 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.253947 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad1ef30-44b5-455b-86a8-136862164eba","Type":"ContainerStarted","Data":"68111763715b0ebec62a3b520478669069c8ee74bf30bde31341abecef1e7a70"} Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.254258 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad1ef30-44b5-455b-86a8-136862164eba","Type":"ContainerStarted","Data":"d03a8b3653906d3dfc854a82a576a7509aaf12a7af09035e0ca7194319eabd7f"} Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.263119 4631 generic.go:334] "Generic (PLEG): container finished" podID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerID="f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59" exitCode=143 Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.263175 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae1fc503-ebc1-4261-b4b8-ee167b101e08","Type":"ContainerDied","Data":"f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59"} Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.265001 4631 generic.go:334] "Generic (PLEG): container finished" podID="005dc9bc-563c-460c-9c82-2203b2512a69" containerID="a0b1bd5c274c1934b738fca3d643c1f6bd6d0e8b4c57f7be1c79e36ce4f822cf" exitCode=0 Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.265146 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0" containerName="nova-scheduler-scheduler" containerID="cri-o://549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46" gracePeriod=30 Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.266066 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" event={"ID":"005dc9bc-563c-460c-9c82-2203b2512a69","Type":"ContainerDied","Data":"a0b1bd5c274c1934b738fca3d643c1f6bd6d0e8b4c57f7be1c79e36ce4f822cf"} Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.266230 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ff98f2fb-56df-43db-92af-f5e6bf57f302" containerName="nova-api-log" containerID="cri-o://21fcac4a83c8a8d62025fab690833751b78f5fd410f8f7c9c84680ef92b44c90" gracePeriod=30 Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.266362 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ff98f2fb-56df-43db-92af-f5e6bf57f302" containerName="nova-api-api" containerID="cri-o://28b4a20f8269bbdc02b64646b3edb32b48a8f9cb14abd7e69c69a45a49caa7fc" gracePeriod=30 Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.484904 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.600187 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-svc\") pod \"005dc9bc-563c-460c-9c82-2203b2512a69\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.600517 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-config\") pod \"005dc9bc-563c-460c-9c82-2203b2512a69\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.600575 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-swift-storage-0\") pod \"005dc9bc-563c-460c-9c82-2203b2512a69\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.600763 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-sb\") pod \"005dc9bc-563c-460c-9c82-2203b2512a69\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.600786 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klskj\" (UniqueName: \"kubernetes.io/projected/005dc9bc-563c-460c-9c82-2203b2512a69-kube-api-access-klskj\") pod \"005dc9bc-563c-460c-9c82-2203b2512a69\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.600836 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-nb\") pod \"005dc9bc-563c-460c-9c82-2203b2512a69\" (UID: \"005dc9bc-563c-460c-9c82-2203b2512a69\") " Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.654483 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/005dc9bc-563c-460c-9c82-2203b2512a69-kube-api-access-klskj" (OuterVolumeSpecName: "kube-api-access-klskj") pod "005dc9bc-563c-460c-9c82-2203b2512a69" (UID: "005dc9bc-563c-460c-9c82-2203b2512a69"). InnerVolumeSpecName "kube-api-access-klskj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.704916 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klskj\" (UniqueName: \"kubernetes.io/projected/005dc9bc-563c-460c-9c82-2203b2512a69-kube-api-access-klskj\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.770904 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "005dc9bc-563c-460c-9c82-2203b2512a69" (UID: "005dc9bc-563c-460c-9c82-2203b2512a69"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.770900 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "005dc9bc-563c-460c-9c82-2203b2512a69" (UID: "005dc9bc-563c-460c-9c82-2203b2512a69"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.809423 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.809451 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.810195 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-config" (OuterVolumeSpecName: "config") pod "005dc9bc-563c-460c-9c82-2203b2512a69" (UID: "005dc9bc-563c-460c-9c82-2203b2512a69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.818383 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "005dc9bc-563c-460c-9c82-2203b2512a69" (UID: "005dc9bc-563c-460c-9c82-2203b2512a69"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.818927 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "005dc9bc-563c-460c-9c82-2203b2512a69" (UID: "005dc9bc-563c-460c-9c82-2203b2512a69"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.822233 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7vh9h"] Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.910906 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.911136 4631 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:00 crc kubenswrapper[4631]: I1129 04:34:00.911146 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/005dc9bc-563c-460c-9c82-2203b2512a69-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.274206 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" event={"ID":"005dc9bc-563c-460c-9c82-2203b2512a69","Type":"ContainerDied","Data":"8c3013c1d2108f14efb4bf6cef8ce75ebde60d6d9b11ad635112da4eba8e16ec"} Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.274233 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-2mddc" Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.274275 4631 scope.go:117] "RemoveContainer" containerID="a0b1bd5c274c1934b738fca3d643c1f6bd6d0e8b4c57f7be1c79e36ce4f822cf" Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.278137 4631 generic.go:334] "Generic (PLEG): container finished" podID="ff98f2fb-56df-43db-92af-f5e6bf57f302" containerID="28b4a20f8269bbdc02b64646b3edb32b48a8f9cb14abd7e69c69a45a49caa7fc" exitCode=0 Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.278168 4631 generic.go:334] "Generic (PLEG): container finished" podID="ff98f2fb-56df-43db-92af-f5e6bf57f302" containerID="21fcac4a83c8a8d62025fab690833751b78f5fd410f8f7c9c84680ef92b44c90" exitCode=143 Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.278221 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff98f2fb-56df-43db-92af-f5e6bf57f302","Type":"ContainerDied","Data":"28b4a20f8269bbdc02b64646b3edb32b48a8f9cb14abd7e69c69a45a49caa7fc"} Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.278247 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff98f2fb-56df-43db-92af-f5e6bf57f302","Type":"ContainerDied","Data":"21fcac4a83c8a8d62025fab690833751b78f5fd410f8f7c9c84680ef92b44c90"} Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.281963 4631 generic.go:334] "Generic (PLEG): container finished" podID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerID="98f356801da8af3ca590eb19610b34ddf7ed1e48357bd2a5e63877f7c2c774c5" exitCode=0 Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.281997 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7vh9h" event={"ID":"40e879cd-6972-40b3-9c4c-63c5404487b2","Type":"ContainerDied","Data":"98f356801da8af3ca590eb19610b34ddf7ed1e48357bd2a5e63877f7c2c774c5"} Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.282021 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7vh9h" event={"ID":"40e879cd-6972-40b3-9c4c-63c5404487b2","Type":"ContainerStarted","Data":"068b7443eeae91cc5e6702ba7b655371e9991883bf761ad10a5565e59906154e"} Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.301032 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2mddc"] Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.312642 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2mddc"] Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.318884 4631 scope.go:117] "RemoveContainer" containerID="5b36d023aa70f7b9e041c78fe1993f8f2e0a861a2ee66b7f6d37be7b10111450" Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.796964 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.921375 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.934145 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-config-data\") pod \"ff98f2fb-56df-43db-92af-f5e6bf57f302\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.934206 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-public-tls-certs\") pod \"ff98f2fb-56df-43db-92af-f5e6bf57f302\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.935179 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbgnp\" (UniqueName: \"kubernetes.io/projected/ff98f2fb-56df-43db-92af-f5e6bf57f302-kube-api-access-wbgnp\") pod \"ff98f2fb-56df-43db-92af-f5e6bf57f302\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.935210 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-internal-tls-certs\") pod \"ff98f2fb-56df-43db-92af-f5e6bf57f302\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.935229 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff98f2fb-56df-43db-92af-f5e6bf57f302-logs\") pod \"ff98f2fb-56df-43db-92af-f5e6bf57f302\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.935256 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-combined-ca-bundle\") pod \"ff98f2fb-56df-43db-92af-f5e6bf57f302\" (UID: \"ff98f2fb-56df-43db-92af-f5e6bf57f302\") " Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.935896 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff98f2fb-56df-43db-92af-f5e6bf57f302-logs" (OuterVolumeSpecName: "logs") pod "ff98f2fb-56df-43db-92af-f5e6bf57f302" (UID: "ff98f2fb-56df-43db-92af-f5e6bf57f302"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.945576 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff98f2fb-56df-43db-92af-f5e6bf57f302-kube-api-access-wbgnp" (OuterVolumeSpecName: "kube-api-access-wbgnp") pod "ff98f2fb-56df-43db-92af-f5e6bf57f302" (UID: "ff98f2fb-56df-43db-92af-f5e6bf57f302"). InnerVolumeSpecName "kube-api-access-wbgnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.965232 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-config-data" (OuterVolumeSpecName: "config-data") pod "ff98f2fb-56df-43db-92af-f5e6bf57f302" (UID: "ff98f2fb-56df-43db-92af-f5e6bf57f302"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.984191 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ff98f2fb-56df-43db-92af-f5e6bf57f302" (UID: "ff98f2fb-56df-43db-92af-f5e6bf57f302"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:34:01 crc kubenswrapper[4631]: I1129 04:34:01.991852 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff98f2fb-56df-43db-92af-f5e6bf57f302" (UID: "ff98f2fb-56df-43db-92af-f5e6bf57f302"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.005523 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ff98f2fb-56df-43db-92af-f5e6bf57f302" (UID: "ff98f2fb-56df-43db-92af-f5e6bf57f302"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.038044 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.038073 4631 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.038084 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wbgnp\" (UniqueName: \"kubernetes.io/projected/ff98f2fb-56df-43db-92af-f5e6bf57f302-kube-api-access-wbgnp\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.038093 4631 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.038102 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff98f2fb-56df-43db-92af-f5e6bf57f302-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.038110 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff98f2fb-56df-43db-92af-f5e6bf57f302-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.303844 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad1ef30-44b5-455b-86a8-136862164eba","Type":"ContainerStarted","Data":"3bede437996376859ff885ed8b2d3ed41ad623963ceace605bdd4f675c98f556"} Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.303994 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.307182 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ff98f2fb-56df-43db-92af-f5e6bf57f302","Type":"ContainerDied","Data":"4642646e20578c98af295bfe9b5b21cdbc64baba5833ba12bfc51e5e49999525"} Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.307232 4631 scope.go:117] "RemoveContainer" containerID="28b4a20f8269bbdc02b64646b3edb32b48a8f9cb14abd7e69c69a45a49caa7fc" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.307224 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.339102 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.5222359 podStartE2EDuration="7.339082551s" podCreationTimestamp="2025-11-29 04:33:55 +0000 UTC" firstStartedPulling="2025-11-29 04:33:56.468789034 +0000 UTC m=+1363.533292548" lastFinishedPulling="2025-11-29 04:34:01.285635685 +0000 UTC m=+1368.350139199" observedRunningTime="2025-11-29 04:34:02.326962163 +0000 UTC m=+1369.391465737" watchObservedRunningTime="2025-11-29 04:34:02.339082551 +0000 UTC m=+1369.403586065" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.385571 4631 scope.go:117] "RemoveContainer" containerID="21fcac4a83c8a8d62025fab690833751b78f5fd410f8f7c9c84680ef92b44c90" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.388949 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.399901 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.433946 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 29 04:34:02 crc kubenswrapper[4631]: E1129 04:34:02.434392 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff98f2fb-56df-43db-92af-f5e6bf57f302" containerName="nova-api-log" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.434405 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff98f2fb-56df-43db-92af-f5e6bf57f302" containerName="nova-api-log" Nov 29 04:34:02 crc kubenswrapper[4631]: E1129 04:34:02.434439 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff98f2fb-56df-43db-92af-f5e6bf57f302" containerName="nova-api-api" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.434446 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff98f2fb-56df-43db-92af-f5e6bf57f302" containerName="nova-api-api" Nov 29 04:34:02 crc kubenswrapper[4631]: E1129 04:34:02.434463 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="005dc9bc-563c-460c-9c82-2203b2512a69" containerName="init" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.434469 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="005dc9bc-563c-460c-9c82-2203b2512a69" containerName="init" Nov 29 04:34:02 crc kubenswrapper[4631]: E1129 04:34:02.434480 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="005dc9bc-563c-460c-9c82-2203b2512a69" containerName="dnsmasq-dns" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.434486 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="005dc9bc-563c-460c-9c82-2203b2512a69" containerName="dnsmasq-dns" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.434653 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff98f2fb-56df-43db-92af-f5e6bf57f302" containerName="nova-api-api" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.434675 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff98f2fb-56df-43db-92af-f5e6bf57f302" containerName="nova-api-log" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.434689 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="005dc9bc-563c-460c-9c82-2203b2512a69" containerName="dnsmasq-dns" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.435730 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.439349 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.439537 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.439696 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.457577 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.559082 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52e9bc32-6412-4929-be28-61ac7021c100-logs\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.559138 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-config-data\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.559164 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4k4mq\" (UniqueName: \"kubernetes.io/projected/52e9bc32-6412-4929-be28-61ac7021c100-kube-api-access-4k4mq\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.559201 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.559278 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-public-tls-certs\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.559438 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-internal-tls-certs\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.640506 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": read tcp 10.217.0.2:42160->10.217.0.193:8775: read: connection reset by peer" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.641096 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": read tcp 10.217.0.2:42152->10.217.0.193:8775: read: connection reset by peer" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.661372 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52e9bc32-6412-4929-be28-61ac7021c100-logs\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.661442 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-config-data\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.661475 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4k4mq\" (UniqueName: \"kubernetes.io/projected/52e9bc32-6412-4929-be28-61ac7021c100-kube-api-access-4k4mq\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.661516 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.661574 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-public-tls-certs\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.661613 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-internal-tls-certs\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.662019 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52e9bc32-6412-4929-be28-61ac7021c100-logs\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.666231 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-public-tls-certs\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.666362 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-internal-tls-certs\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.666520 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.673679 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52e9bc32-6412-4929-be28-61ac7021c100-config-data\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.676006 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4k4mq\" (UniqueName: \"kubernetes.io/projected/52e9bc32-6412-4929-be28-61ac7021c100-kube-api-access-4k4mq\") pod \"nova-api-0\" (UID: \"52e9bc32-6412-4929-be28-61ac7021c100\") " pod="openstack/nova-api-0" Nov 29 04:34:02 crc kubenswrapper[4631]: I1129 04:34:02.758615 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.051613 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.169364 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae1fc503-ebc1-4261-b4b8-ee167b101e08-logs\") pod \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.169444 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkbtj\" (UniqueName: \"kubernetes.io/projected/ae1fc503-ebc1-4261-b4b8-ee167b101e08-kube-api-access-wkbtj\") pod \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.169576 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-combined-ca-bundle\") pod \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.169601 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-nova-metadata-tls-certs\") pod \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.169650 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-config-data\") pod \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\" (UID: \"ae1fc503-ebc1-4261-b4b8-ee167b101e08\") " Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.170391 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae1fc503-ebc1-4261-b4b8-ee167b101e08-logs" (OuterVolumeSpecName: "logs") pod "ae1fc503-ebc1-4261-b4b8-ee167b101e08" (UID: "ae1fc503-ebc1-4261-b4b8-ee167b101e08"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.179654 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae1fc503-ebc1-4261-b4b8-ee167b101e08-kube-api-access-wkbtj" (OuterVolumeSpecName: "kube-api-access-wkbtj") pod "ae1fc503-ebc1-4261-b4b8-ee167b101e08" (UID: "ae1fc503-ebc1-4261-b4b8-ee167b101e08"). InnerVolumeSpecName "kube-api-access-wkbtj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.216718 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae1fc503-ebc1-4261-b4b8-ee167b101e08" (UID: "ae1fc503-ebc1-4261-b4b8-ee167b101e08"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.217830 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "ae1fc503-ebc1-4261-b4b8-ee167b101e08" (UID: "ae1fc503-ebc1-4261-b4b8-ee167b101e08"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.225586 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-config-data" (OuterVolumeSpecName: "config-data") pod "ae1fc503-ebc1-4261-b4b8-ee167b101e08" (UID: "ae1fc503-ebc1-4261-b4b8-ee167b101e08"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.240607 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="005dc9bc-563c-460c-9c82-2203b2512a69" path="/var/lib/kubelet/pods/005dc9bc-563c-460c-9c82-2203b2512a69/volumes" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.241294 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff98f2fb-56df-43db-92af-f5e6bf57f302" path="/var/lib/kubelet/pods/ff98f2fb-56df-43db-92af-f5e6bf57f302/volumes" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.271434 4631 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae1fc503-ebc1-4261-b4b8-ee167b101e08-logs\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.271460 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkbtj\" (UniqueName: \"kubernetes.io/projected/ae1fc503-ebc1-4261-b4b8-ee167b101e08-kube-api-access-wkbtj\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.271472 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.271482 4631 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.271490 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae1fc503-ebc1-4261-b4b8-ee167b101e08-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.295110 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.346843 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"52e9bc32-6412-4929-be28-61ac7021c100","Type":"ContainerStarted","Data":"31219343af0bc34a2821fe6199e7b8600c7031947b50d7db2c1f143f3a20e695"} Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.355913 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7vh9h" event={"ID":"40e879cd-6972-40b3-9c4c-63c5404487b2","Type":"ContainerStarted","Data":"c2ecc7629be0682c7c613617a8f99b8f0064d68c787d06d8e70a2c6981809c38"} Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.364934 4631 generic.go:334] "Generic (PLEG): container finished" podID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerID="202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602" exitCode=0 Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.365026 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae1fc503-ebc1-4261-b4b8-ee167b101e08","Type":"ContainerDied","Data":"202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602"} Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.365052 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae1fc503-ebc1-4261-b4b8-ee167b101e08","Type":"ContainerDied","Data":"70118047aecb1a3458ccbcae466b93ba1c3f6acf5c67359ab81e890f6c3e9cca"} Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.365077 4631 scope.go:117] "RemoveContainer" containerID="202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.365246 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.425623 4631 scope.go:117] "RemoveContainer" containerID="f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.442818 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.454030 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.469827 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:34:03 crc kubenswrapper[4631]: E1129 04:34:03.470234 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-log" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.470251 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-log" Nov 29 04:34:03 crc kubenswrapper[4631]: E1129 04:34:03.470267 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-metadata" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.470273 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-metadata" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.470468 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-log" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.470488 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" containerName="nova-metadata-metadata" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.471447 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.474074 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.474275 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.486631 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.492686 4631 scope.go:117] "RemoveContainer" containerID="202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602" Nov 29 04:34:03 crc kubenswrapper[4631]: E1129 04:34:03.498357 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602\": container with ID starting with 202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602 not found: ID does not exist" containerID="202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.498401 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602"} err="failed to get container status \"202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602\": rpc error: code = NotFound desc = could not find container \"202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602\": container with ID starting with 202bc7f228341f0570f5643982fef77394c9e9b43e692cd42dbc33134adda602 not found: ID does not exist" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.498430 4631 scope.go:117] "RemoveContainer" containerID="f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59" Nov 29 04:34:03 crc kubenswrapper[4631]: E1129 04:34:03.499463 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59\": container with ID starting with f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59 not found: ID does not exist" containerID="f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.499505 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59"} err="failed to get container status \"f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59\": rpc error: code = NotFound desc = could not find container \"f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59\": container with ID starting with f404663c79a51a5dbeaf8dc8c24268c0035763b2267e2648f0dac56e63d5ef59 not found: ID does not exist" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.578159 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5610ba4-568f-4281-90cd-7b4a187a9884-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.578210 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5610ba4-568f-4281-90cd-7b4a187a9884-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.578298 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5610ba4-568f-4281-90cd-7b4a187a9884-logs\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.578345 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5610ba4-568f-4281-90cd-7b4a187a9884-config-data\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.578383 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2rbk\" (UniqueName: \"kubernetes.io/projected/a5610ba4-568f-4281-90cd-7b4a187a9884-kube-api-access-r2rbk\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.680520 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5610ba4-568f-4281-90cd-7b4a187a9884-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.680774 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5610ba4-568f-4281-90cd-7b4a187a9884-logs\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.680918 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5610ba4-568f-4281-90cd-7b4a187a9884-config-data\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.681034 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2rbk\" (UniqueName: \"kubernetes.io/projected/a5610ba4-568f-4281-90cd-7b4a187a9884-kube-api-access-r2rbk\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.681190 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5610ba4-568f-4281-90cd-7b4a187a9884-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.681208 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5610ba4-568f-4281-90cd-7b4a187a9884-logs\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.684724 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5610ba4-568f-4281-90cd-7b4a187a9884-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.685186 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5610ba4-568f-4281-90cd-7b4a187a9884-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.687821 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5610ba4-568f-4281-90cd-7b4a187a9884-config-data\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.698553 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2rbk\" (UniqueName: \"kubernetes.io/projected/a5610ba4-568f-4281-90cd-7b4a187a9884-kube-api-access-r2rbk\") pod \"nova-metadata-0\" (UID: \"a5610ba4-568f-4281-90cd-7b4a187a9884\") " pod="openstack/nova-metadata-0" Nov 29 04:34:03 crc kubenswrapper[4631]: I1129 04:34:03.791700 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 29 04:34:04 crc kubenswrapper[4631]: I1129 04:34:04.405175 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"52e9bc32-6412-4929-be28-61ac7021c100","Type":"ContainerStarted","Data":"ca9382b7f0f9c24d739b9c5179327ffdf986c19f72570f4b03223c8dd6a3b6e4"} Nov 29 04:34:04 crc kubenswrapper[4631]: I1129 04:34:04.410010 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"52e9bc32-6412-4929-be28-61ac7021c100","Type":"ContainerStarted","Data":"8a5c614420e1e3ececb4f14dd64059592f4041627bbfecea1459cedd8e696849"} Nov 29 04:34:04 crc kubenswrapper[4631]: I1129 04:34:04.645896 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.645872434 podStartE2EDuration="2.645872434s" podCreationTimestamp="2025-11-29 04:34:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:34:04.434665244 +0000 UTC m=+1371.499168818" watchObservedRunningTime="2025-11-29 04:34:04.645872434 +0000 UTC m=+1371.710375958" Nov 29 04:34:04 crc kubenswrapper[4631]: I1129 04:34:04.655137 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 29 04:34:05 crc kubenswrapper[4631]: I1129 04:34:05.235439 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae1fc503-ebc1-4261-b4b8-ee167b101e08" path="/var/lib/kubelet/pods/ae1fc503-ebc1-4261-b4b8-ee167b101e08/volumes" Nov 29 04:34:05 crc kubenswrapper[4631]: I1129 04:34:05.414640 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a5610ba4-568f-4281-90cd-7b4a187a9884","Type":"ContainerStarted","Data":"cbcf9d39ab81e82c85994af77b57db9d59a801ebaab347b041d3bca93f5fa3f4"} Nov 29 04:34:06 crc kubenswrapper[4631]: I1129 04:34:06.430466 4631 generic.go:334] "Generic (PLEG): container finished" podID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerID="c2ecc7629be0682c7c613617a8f99b8f0064d68c787d06d8e70a2c6981809c38" exitCode=0 Nov 29 04:34:06 crc kubenswrapper[4631]: I1129 04:34:06.430564 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7vh9h" event={"ID":"40e879cd-6972-40b3-9c4c-63c5404487b2","Type":"ContainerDied","Data":"c2ecc7629be0682c7c613617a8f99b8f0064d68c787d06d8e70a2c6981809c38"} Nov 29 04:34:06 crc kubenswrapper[4631]: I1129 04:34:06.439033 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a5610ba4-568f-4281-90cd-7b4a187a9884","Type":"ContainerStarted","Data":"476fcdc046f08a39f1c2d1b8d45a4dbe6294ca111e0c179033acb88092ac8089"} Nov 29 04:34:07 crc kubenswrapper[4631]: I1129 04:34:07.451912 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a5610ba4-568f-4281-90cd-7b4a187a9884","Type":"ContainerStarted","Data":"b5f83e968f26e8e8fe0a041f00457bc246d6461f5509047249635bb2cd204e89"} Nov 29 04:34:07 crc kubenswrapper[4631]: I1129 04:34:07.497604 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.497567088 podStartE2EDuration="4.497567088s" podCreationTimestamp="2025-11-29 04:34:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:34:07.480178401 +0000 UTC m=+1374.544681945" watchObservedRunningTime="2025-11-29 04:34:07.497567088 +0000 UTC m=+1374.562070632" Nov 29 04:34:08 crc kubenswrapper[4631]: I1129 04:34:08.470523 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7vh9h" event={"ID":"40e879cd-6972-40b3-9c4c-63c5404487b2","Type":"ContainerStarted","Data":"327d9bd07e47cce7a30e94aafe53f69e85f72d046d226fe2c24490ed612194c9"} Nov 29 04:34:08 crc kubenswrapper[4631]: I1129 04:34:08.500182 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7vh9h" podStartSLOduration=3.04286229 podStartE2EDuration="9.500153603s" podCreationTimestamp="2025-11-29 04:33:59 +0000 UTC" firstStartedPulling="2025-11-29 04:34:01.286444414 +0000 UTC m=+1368.350947928" lastFinishedPulling="2025-11-29 04:34:07.743735707 +0000 UTC m=+1374.808239241" observedRunningTime="2025-11-29 04:34:08.496674738 +0000 UTC m=+1375.561178262" watchObservedRunningTime="2025-11-29 04:34:08.500153603 +0000 UTC m=+1375.564657157" Nov 29 04:34:08 crc kubenswrapper[4631]: I1129 04:34:08.792273 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 04:34:08 crc kubenswrapper[4631]: I1129 04:34:08.792377 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 29 04:34:10 crc kubenswrapper[4631]: I1129 04:34:10.130238 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:34:10 crc kubenswrapper[4631]: I1129 04:34:10.130299 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:34:11 crc kubenswrapper[4631]: I1129 04:34:11.222010 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7vh9h" podUID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerName="registry-server" probeResult="failure" output=< Nov 29 04:34:11 crc kubenswrapper[4631]: timeout: failed to connect service ":50051" within 1s Nov 29 04:34:11 crc kubenswrapper[4631]: > Nov 29 04:34:12 crc kubenswrapper[4631]: I1129 04:34:12.759510 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 29 04:34:12 crc kubenswrapper[4631]: I1129 04:34:12.759944 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 29 04:34:13 crc kubenswrapper[4631]: I1129 04:34:13.778607 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="52e9bc32-6412-4929-be28-61ac7021c100" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:34:13 crc kubenswrapper[4631]: I1129 04:34:13.778617 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="52e9bc32-6412-4929-be28-61ac7021c100" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:34:13 crc kubenswrapper[4631]: I1129 04:34:13.793749 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 29 04:34:13 crc kubenswrapper[4631]: I1129 04:34:13.793988 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 29 04:34:15 crc kubenswrapper[4631]: I1129 04:34:15.047813 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a5610ba4-568f-4281-90cd-7b4a187a9884" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:34:15 crc kubenswrapper[4631]: I1129 04:34:15.048423 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a5610ba4-568f-4281-90cd-7b4a187a9884" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 29 04:34:20 crc kubenswrapper[4631]: I1129 04:34:20.203109 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:34:20 crc kubenswrapper[4631]: I1129 04:34:20.272452 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:34:20 crc kubenswrapper[4631]: I1129 04:34:20.457311 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7vh9h"] Nov 29 04:34:21 crc kubenswrapper[4631]: I1129 04:34:21.641087 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7vh9h" podUID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerName="registry-server" containerID="cri-o://327d9bd07e47cce7a30e94aafe53f69e85f72d046d226fe2c24490ed612194c9" gracePeriod=2 Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.656987 4631 generic.go:334] "Generic (PLEG): container finished" podID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerID="327d9bd07e47cce7a30e94aafe53f69e85f72d046d226fe2c24490ed612194c9" exitCode=0 Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.657080 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7vh9h" event={"ID":"40e879cd-6972-40b3-9c4c-63c5404487b2","Type":"ContainerDied","Data":"327d9bd07e47cce7a30e94aafe53f69e85f72d046d226fe2c24490ed612194c9"} Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.657432 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7vh9h" event={"ID":"40e879cd-6972-40b3-9c4c-63c5404487b2","Type":"ContainerDied","Data":"068b7443eeae91cc5e6702ba7b655371e9991883bf761ad10a5565e59906154e"} Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.657457 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="068b7443eeae91cc5e6702ba7b655371e9991883bf761ad10a5565e59906154e" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.720968 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.771959 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.772033 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.772510 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.772577 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.785122 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.785535 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.843406 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-utilities\") pod \"40e879cd-6972-40b3-9c4c-63c5404487b2\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.843810 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-catalog-content\") pod \"40e879cd-6972-40b3-9c4c-63c5404487b2\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.843923 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4x42h\" (UniqueName: \"kubernetes.io/projected/40e879cd-6972-40b3-9c4c-63c5404487b2-kube-api-access-4x42h\") pod \"40e879cd-6972-40b3-9c4c-63c5404487b2\" (UID: \"40e879cd-6972-40b3-9c4c-63c5404487b2\") " Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.846267 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-utilities" (OuterVolumeSpecName: "utilities") pod "40e879cd-6972-40b3-9c4c-63c5404487b2" (UID: "40e879cd-6972-40b3-9c4c-63c5404487b2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.909477 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40e879cd-6972-40b3-9c4c-63c5404487b2-kube-api-access-4x42h" (OuterVolumeSpecName: "kube-api-access-4x42h") pod "40e879cd-6972-40b3-9c4c-63c5404487b2" (UID: "40e879cd-6972-40b3-9c4c-63c5404487b2"). InnerVolumeSpecName "kube-api-access-4x42h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.945881 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4x42h\" (UniqueName: \"kubernetes.io/projected/40e879cd-6972-40b3-9c4c-63c5404487b2-kube-api-access-4x42h\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:22 crc kubenswrapper[4631]: I1129 04:34:22.945914 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:23 crc kubenswrapper[4631]: I1129 04:34:23.020280 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "40e879cd-6972-40b3-9c4c-63c5404487b2" (UID: "40e879cd-6972-40b3-9c4c-63c5404487b2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:34:23 crc kubenswrapper[4631]: I1129 04:34:23.046501 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40e879cd-6972-40b3-9c4c-63c5404487b2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:23 crc kubenswrapper[4631]: I1129 04:34:23.669096 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7vh9h" Nov 29 04:34:23 crc kubenswrapper[4631]: I1129 04:34:23.702449 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7vh9h"] Nov 29 04:34:23 crc kubenswrapper[4631]: I1129 04:34:23.713684 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7vh9h"] Nov 29 04:34:23 crc kubenswrapper[4631]: I1129 04:34:23.798420 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 29 04:34:23 crc kubenswrapper[4631]: I1129 04:34:23.799319 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 29 04:34:23 crc kubenswrapper[4631]: I1129 04:34:23.814948 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 29 04:34:24 crc kubenswrapper[4631]: I1129 04:34:24.689361 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 29 04:34:25 crc kubenswrapper[4631]: I1129 04:34:25.230973 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40e879cd-6972-40b3-9c4c-63c5404487b2" path="/var/lib/kubelet/pods/40e879cd-6972-40b3-9c4c-63c5404487b2/volumes" Nov 29 04:34:25 crc kubenswrapper[4631]: I1129 04:34:25.854009 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.711730 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.736570 4631 generic.go:334] "Generic (PLEG): container finished" podID="08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0" containerID="549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46" exitCode=137 Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.736615 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0","Type":"ContainerDied","Data":"549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46"} Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.736642 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0","Type":"ContainerDied","Data":"a052958a8482f8464db75630693d98338ed1841c164cb2159cf6bf640757a3a4"} Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.736659 4631 scope.go:117] "RemoveContainer" containerID="549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46" Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.736994 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.755116 4631 scope.go:117] "RemoveContainer" containerID="549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46" Nov 29 04:34:30 crc kubenswrapper[4631]: E1129 04:34:30.755523 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46\": container with ID starting with 549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46 not found: ID does not exist" containerID="549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46" Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.755553 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46"} err="failed to get container status \"549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46\": rpc error: code = NotFound desc = could not find container \"549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46\": container with ID starting with 549a24213cee537523a5fed8630e043b89f5f7ca837cdd8983f37cec347d7c46 not found: ID does not exist" Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.913061 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-combined-ca-bundle\") pod \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.913304 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-config-data\") pod \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.913394 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4nzhn\" (UniqueName: \"kubernetes.io/projected/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-kube-api-access-4nzhn\") pod \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\" (UID: \"08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0\") " Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.922933 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-kube-api-access-4nzhn" (OuterVolumeSpecName: "kube-api-access-4nzhn") pod "08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0" (UID: "08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0"). InnerVolumeSpecName "kube-api-access-4nzhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.938581 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-config-data" (OuterVolumeSpecName: "config-data") pod "08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0" (UID: "08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:34:30 crc kubenswrapper[4631]: I1129 04:34:30.954502 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0" (UID: "08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.014649 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.014676 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.014685 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4nzhn\" (UniqueName: \"kubernetes.io/projected/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0-kube-api-access-4nzhn\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.064561 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.073103 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.089644 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:34:31 crc kubenswrapper[4631]: E1129 04:34:31.089991 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerName="extract-utilities" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.090007 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerName="extract-utilities" Nov 29 04:34:31 crc kubenswrapper[4631]: E1129 04:34:31.090023 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerName="registry-server" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.090029 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerName="registry-server" Nov 29 04:34:31 crc kubenswrapper[4631]: E1129 04:34:31.090067 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0" containerName="nova-scheduler-scheduler" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.090074 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0" containerName="nova-scheduler-scheduler" Nov 29 04:34:31 crc kubenswrapper[4631]: E1129 04:34:31.090081 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerName="extract-content" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.090088 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerName="extract-content" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.090260 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="40e879cd-6972-40b3-9c4c-63c5404487b2" containerName="registry-server" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.090281 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0" containerName="nova-scheduler-scheduler" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.090871 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.093916 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.098285 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.228365 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0" path="/var/lib/kubelet/pods/08c1e5d6-e5ee-4c27-a6e5-75bf9497a2e0/volumes" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.259814 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6926c23d-5598-420e-b328-c95ffe4d1475-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6926c23d-5598-420e-b328-c95ffe4d1475\") " pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.260287 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbpdr\" (UniqueName: \"kubernetes.io/projected/6926c23d-5598-420e-b328-c95ffe4d1475-kube-api-access-lbpdr\") pod \"nova-scheduler-0\" (UID: \"6926c23d-5598-420e-b328-c95ffe4d1475\") " pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.260313 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6926c23d-5598-420e-b328-c95ffe4d1475-config-data\") pod \"nova-scheduler-0\" (UID: \"6926c23d-5598-420e-b328-c95ffe4d1475\") " pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.363275 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6926c23d-5598-420e-b328-c95ffe4d1475-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6926c23d-5598-420e-b328-c95ffe4d1475\") " pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.364264 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbpdr\" (UniqueName: \"kubernetes.io/projected/6926c23d-5598-420e-b328-c95ffe4d1475-kube-api-access-lbpdr\") pod \"nova-scheduler-0\" (UID: \"6926c23d-5598-420e-b328-c95ffe4d1475\") " pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.364423 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6926c23d-5598-420e-b328-c95ffe4d1475-config-data\") pod \"nova-scheduler-0\" (UID: \"6926c23d-5598-420e-b328-c95ffe4d1475\") " pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.370984 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6926c23d-5598-420e-b328-c95ffe4d1475-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6926c23d-5598-420e-b328-c95ffe4d1475\") " pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.378766 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6926c23d-5598-420e-b328-c95ffe4d1475-config-data\") pod \"nova-scheduler-0\" (UID: \"6926c23d-5598-420e-b328-c95ffe4d1475\") " pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.386081 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbpdr\" (UniqueName: \"kubernetes.io/projected/6926c23d-5598-420e-b328-c95ffe4d1475-kube-api-access-lbpdr\") pod \"nova-scheduler-0\" (UID: \"6926c23d-5598-420e-b328-c95ffe4d1475\") " pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.464193 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 29 04:34:31 crc kubenswrapper[4631]: W1129 04:34:31.746274 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6926c23d_5598_420e_b328_c95ffe4d1475.slice/crio-66afea99fdb52dd6580c25b57e4783aaf41fe12c7223061055e6f8bd957aeda0 WatchSource:0}: Error finding container 66afea99fdb52dd6580c25b57e4783aaf41fe12c7223061055e6f8bd957aeda0: Status 404 returned error can't find the container with id 66afea99fdb52dd6580c25b57e4783aaf41fe12c7223061055e6f8bd957aeda0 Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.746510 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.829468 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7rzhg"] Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.834170 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.845929 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7rzhg"] Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.973947 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-catalog-content\") pod \"community-operators-7rzhg\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.974000 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-utilities\") pod \"community-operators-7rzhg\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:31 crc kubenswrapper[4631]: I1129 04:34:31.974019 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdtc9\" (UniqueName: \"kubernetes.io/projected/182dca6d-47ba-4896-8e5c-418585e96c46-kube-api-access-gdtc9\") pod \"community-operators-7rzhg\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.076307 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-catalog-content\") pod \"community-operators-7rzhg\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.076374 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-utilities\") pod \"community-operators-7rzhg\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.076391 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdtc9\" (UniqueName: \"kubernetes.io/projected/182dca6d-47ba-4896-8e5c-418585e96c46-kube-api-access-gdtc9\") pod \"community-operators-7rzhg\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.076970 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-catalog-content\") pod \"community-operators-7rzhg\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.077303 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-utilities\") pod \"community-operators-7rzhg\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.106060 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdtc9\" (UniqueName: \"kubernetes.io/projected/182dca6d-47ba-4896-8e5c-418585e96c46-kube-api-access-gdtc9\") pod \"community-operators-7rzhg\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.163277 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.549454 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7rzhg"] Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.761608 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rzhg" event={"ID":"182dca6d-47ba-4896-8e5c-418585e96c46","Type":"ContainerStarted","Data":"4c0311c42704741c74aed961a71c3b2158f744f493bbdead3705ee42584b1bea"} Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.764119 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6926c23d-5598-420e-b328-c95ffe4d1475","Type":"ContainerStarted","Data":"21e22a072ac898ebcb3213a3939f559bd759ec3fa878348659224420a7f3299e"} Nov 29 04:34:32 crc kubenswrapper[4631]: I1129 04:34:32.764144 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6926c23d-5598-420e-b328-c95ffe4d1475","Type":"ContainerStarted","Data":"66afea99fdb52dd6580c25b57e4783aaf41fe12c7223061055e6f8bd957aeda0"} Nov 29 04:34:33 crc kubenswrapper[4631]: I1129 04:34:33.274114 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.274095268 podStartE2EDuration="2.274095268s" podCreationTimestamp="2025-11-29 04:34:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:34:32.783666378 +0000 UTC m=+1399.848169892" watchObservedRunningTime="2025-11-29 04:34:33.274095268 +0000 UTC m=+1400.338598792" Nov 29 04:34:33 crc kubenswrapper[4631]: I1129 04:34:33.801094 4631 generic.go:334] "Generic (PLEG): container finished" podID="182dca6d-47ba-4896-8e5c-418585e96c46" containerID="c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8" exitCode=0 Nov 29 04:34:33 crc kubenswrapper[4631]: I1129 04:34:33.802507 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rzhg" event={"ID":"182dca6d-47ba-4896-8e5c-418585e96c46","Type":"ContainerDied","Data":"c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8"} Nov 29 04:34:34 crc kubenswrapper[4631]: I1129 04:34:34.810163 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rzhg" event={"ID":"182dca6d-47ba-4896-8e5c-418585e96c46","Type":"ContainerStarted","Data":"1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0"} Nov 29 04:34:35 crc kubenswrapper[4631]: I1129 04:34:35.827148 4631 generic.go:334] "Generic (PLEG): container finished" podID="182dca6d-47ba-4896-8e5c-418585e96c46" containerID="1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0" exitCode=0 Nov 29 04:34:35 crc kubenswrapper[4631]: I1129 04:34:35.827251 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rzhg" event={"ID":"182dca6d-47ba-4896-8e5c-418585e96c46","Type":"ContainerDied","Data":"1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0"} Nov 29 04:34:36 crc kubenswrapper[4631]: I1129 04:34:36.465710 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 29 04:34:36 crc kubenswrapper[4631]: I1129 04:34:36.844389 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rzhg" event={"ID":"182dca6d-47ba-4896-8e5c-418585e96c46","Type":"ContainerStarted","Data":"c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24"} Nov 29 04:34:36 crc kubenswrapper[4631]: I1129 04:34:36.879121 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7rzhg" podStartSLOduration=3.112482708 podStartE2EDuration="5.879090622s" podCreationTimestamp="2025-11-29 04:34:31 +0000 UTC" firstStartedPulling="2025-11-29 04:34:33.807046194 +0000 UTC m=+1400.871549718" lastFinishedPulling="2025-11-29 04:34:36.573654108 +0000 UTC m=+1403.638157632" observedRunningTime="2025-11-29 04:34:36.864827641 +0000 UTC m=+1403.929331165" watchObservedRunningTime="2025-11-29 04:34:36.879090622 +0000 UTC m=+1403.943594146" Nov 29 04:34:41 crc kubenswrapper[4631]: I1129 04:34:41.465603 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 29 04:34:41 crc kubenswrapper[4631]: I1129 04:34:41.514279 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 29 04:34:41 crc kubenswrapper[4631]: I1129 04:34:41.940602 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 29 04:34:42 crc kubenswrapper[4631]: I1129 04:34:42.163722 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:42 crc kubenswrapper[4631]: I1129 04:34:42.163777 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:42 crc kubenswrapper[4631]: I1129 04:34:42.217644 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:42 crc kubenswrapper[4631]: I1129 04:34:42.953696 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:43 crc kubenswrapper[4631]: I1129 04:34:43.002962 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7rzhg"] Nov 29 04:34:44 crc kubenswrapper[4631]: I1129 04:34:44.919065 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7rzhg" podUID="182dca6d-47ba-4896-8e5c-418585e96c46" containerName="registry-server" containerID="cri-o://c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24" gracePeriod=2 Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.329938 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.451494 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdtc9\" (UniqueName: \"kubernetes.io/projected/182dca6d-47ba-4896-8e5c-418585e96c46-kube-api-access-gdtc9\") pod \"182dca6d-47ba-4896-8e5c-418585e96c46\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.451552 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-catalog-content\") pod \"182dca6d-47ba-4896-8e5c-418585e96c46\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.451726 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-utilities\") pod \"182dca6d-47ba-4896-8e5c-418585e96c46\" (UID: \"182dca6d-47ba-4896-8e5c-418585e96c46\") " Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.452639 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-utilities" (OuterVolumeSpecName: "utilities") pod "182dca6d-47ba-4896-8e5c-418585e96c46" (UID: "182dca6d-47ba-4896-8e5c-418585e96c46"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.456906 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/182dca6d-47ba-4896-8e5c-418585e96c46-kube-api-access-gdtc9" (OuterVolumeSpecName: "kube-api-access-gdtc9") pod "182dca6d-47ba-4896-8e5c-418585e96c46" (UID: "182dca6d-47ba-4896-8e5c-418585e96c46"). InnerVolumeSpecName "kube-api-access-gdtc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.501093 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "182dca6d-47ba-4896-8e5c-418585e96c46" (UID: "182dca6d-47ba-4896-8e5c-418585e96c46"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.553425 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.553461 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdtc9\" (UniqueName: \"kubernetes.io/projected/182dca6d-47ba-4896-8e5c-418585e96c46-kube-api-access-gdtc9\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.553474 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/182dca6d-47ba-4896-8e5c-418585e96c46-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.930730 4631 generic.go:334] "Generic (PLEG): container finished" podID="182dca6d-47ba-4896-8e5c-418585e96c46" containerID="c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24" exitCode=0 Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.930785 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7rzhg" Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.930820 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rzhg" event={"ID":"182dca6d-47ba-4896-8e5c-418585e96c46","Type":"ContainerDied","Data":"c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24"} Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.931824 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rzhg" event={"ID":"182dca6d-47ba-4896-8e5c-418585e96c46","Type":"ContainerDied","Data":"4c0311c42704741c74aed961a71c3b2158f744f493bbdead3705ee42584b1bea"} Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.931846 4631 scope.go:117] "RemoveContainer" containerID="c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24" Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.985449 4631 scope.go:117] "RemoveContainer" containerID="1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0" Nov 29 04:34:45 crc kubenswrapper[4631]: I1129 04:34:45.995977 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7rzhg"] Nov 29 04:34:46 crc kubenswrapper[4631]: I1129 04:34:46.024467 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7rzhg"] Nov 29 04:34:46 crc kubenswrapper[4631]: I1129 04:34:46.048509 4631 scope.go:117] "RemoveContainer" containerID="c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8" Nov 29 04:34:46 crc kubenswrapper[4631]: I1129 04:34:46.076974 4631 scope.go:117] "RemoveContainer" containerID="c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24" Nov 29 04:34:46 crc kubenswrapper[4631]: E1129 04:34:46.079884 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24\": container with ID starting with c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24 not found: ID does not exist" containerID="c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24" Nov 29 04:34:46 crc kubenswrapper[4631]: I1129 04:34:46.079946 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24"} err="failed to get container status \"c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24\": rpc error: code = NotFound desc = could not find container \"c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24\": container with ID starting with c5e978ddebfdb42a4bc7471c459f81517d99c1f2821cd40444361b87d98e7c24 not found: ID does not exist" Nov 29 04:34:46 crc kubenswrapper[4631]: I1129 04:34:46.079974 4631 scope.go:117] "RemoveContainer" containerID="1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0" Nov 29 04:34:46 crc kubenswrapper[4631]: E1129 04:34:46.080724 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0\": container with ID starting with 1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0 not found: ID does not exist" containerID="1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0" Nov 29 04:34:46 crc kubenswrapper[4631]: I1129 04:34:46.080764 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0"} err="failed to get container status \"1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0\": rpc error: code = NotFound desc = could not find container \"1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0\": container with ID starting with 1c3afee6561d10f9d87fd21072639d70441935f5d97472a393e81c1ed8f985a0 not found: ID does not exist" Nov 29 04:34:46 crc kubenswrapper[4631]: I1129 04:34:46.080783 4631 scope.go:117] "RemoveContainer" containerID="c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8" Nov 29 04:34:46 crc kubenswrapper[4631]: E1129 04:34:46.081046 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8\": container with ID starting with c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8 not found: ID does not exist" containerID="c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8" Nov 29 04:34:46 crc kubenswrapper[4631]: I1129 04:34:46.081073 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8"} err="failed to get container status \"c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8\": rpc error: code = NotFound desc = could not find container \"c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8\": container with ID starting with c01e93c461c53b13acbc7f245879946f2ad9a0e5e0b76a1c71443f89e40142b8 not found: ID does not exist" Nov 29 04:34:47 crc kubenswrapper[4631]: I1129 04:34:47.235758 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="182dca6d-47ba-4896-8e5c-418585e96c46" path="/var/lib/kubelet/pods/182dca6d-47ba-4896-8e5c-418585e96c46/volumes" Nov 29 04:34:49 crc kubenswrapper[4631]: I1129 04:34:49.080531 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-q4tvc" podUID="75e40b24-8291-44fe-bd37-97d493e2c136" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.71:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 29 04:34:50 crc kubenswrapper[4631]: I1129 04:34:50.715957 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:34:50 crc kubenswrapper[4631]: I1129 04:34:50.716357 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:34:52 crc kubenswrapper[4631]: I1129 04:34:52.296612 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 04:34:53 crc kubenswrapper[4631]: I1129 04:34:53.258692 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 04:34:56 crc kubenswrapper[4631]: I1129 04:34:56.701997 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="fdd7deaa-61f9-48f4-96c2-6d10d8df4192" containerName="rabbitmq" containerID="cri-o://96b93f6eadde751b36470a5b4c837bf00d52b3177d3155bb316b665bf4fea62e" gracePeriod=604796 Nov 29 04:34:57 crc kubenswrapper[4631]: I1129 04:34:57.544587 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" containerName="rabbitmq" containerID="cri-o://afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d" gracePeriod=604796 Nov 29 04:35:02 crc kubenswrapper[4631]: I1129 04:35:02.158133 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="fdd7deaa-61f9-48f4-96c2-6d10d8df4192" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Nov 29 04:35:02 crc kubenswrapper[4631]: I1129 04:35:02.527245 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.130405 4631 generic.go:334] "Generic (PLEG): container finished" podID="fdd7deaa-61f9-48f4-96c2-6d10d8df4192" containerID="96b93f6eadde751b36470a5b4c837bf00d52b3177d3155bb316b665bf4fea62e" exitCode=0 Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.130480 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fdd7deaa-61f9-48f4-96c2-6d10d8df4192","Type":"ContainerDied","Data":"96b93f6eadde751b36470a5b4c837bf00d52b3177d3155bb316b665bf4fea62e"} Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.321500 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.481727 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-plugins\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482159 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-plugins-conf\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482266 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-tls\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482301 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-server-conf\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482509 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-confd\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482600 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-erlang-cookie-secret\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482641 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-config-data\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482790 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482817 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-pod-info\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482837 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482842 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jnjb\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-kube-api-access-5jnjb\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482907 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-erlang-cookie\") pod \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\" (UID: \"fdd7deaa-61f9-48f4-96c2-6d10d8df4192\") " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.482996 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.483525 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.483551 4631 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.483568 4631 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.488536 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.489146 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-pod-info" (OuterVolumeSpecName: "pod-info") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.490547 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-kube-api-access-5jnjb" (OuterVolumeSpecName: "kube-api-access-5jnjb") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "kube-api-access-5jnjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.490903 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.492314 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.554886 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-config-data" (OuterVolumeSpecName: "config-data") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.575656 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-server-conf" (OuterVolumeSpecName: "server-conf") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.586386 4631 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.586416 4631 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-pod-info\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.586427 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jnjb\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-kube-api-access-5jnjb\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.586437 4631 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.586446 4631 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.586454 4631 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-server-conf\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.586463 4631 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.586471 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.606094 4631 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.634542 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "fdd7deaa-61f9-48f4-96c2-6d10d8df4192" (UID: "fdd7deaa-61f9-48f4-96c2-6d10d8df4192"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.687557 4631 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:03 crc kubenswrapper[4631]: I1129 04:35:03.687585 4631 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fdd7deaa-61f9-48f4-96c2-6d10d8df4192-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.121891 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.151469 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fdd7deaa-61f9-48f4-96c2-6d10d8df4192","Type":"ContainerDied","Data":"3d3ed012a1a820aa2db6ffc3a0ac8b21f468449854fcdada90ed8cf97c7b028c"} Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.151519 4631 scope.go:117] "RemoveContainer" containerID="96b93f6eadde751b36470a5b4c837bf00d52b3177d3155bb316b665bf4fea62e" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.151661 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.200511 4631 generic.go:334] "Generic (PLEG): container finished" podID="566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" containerID="afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d" exitCode=0 Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.200553 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6","Type":"ContainerDied","Data":"afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d"} Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.200581 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6","Type":"ContainerDied","Data":"2dd0cf35aa5f76e54f1e1dbc2d43a400a10ec66174deda4cafe39c895fd02e5c"} Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.200669 4631 scope.go:117] "RemoveContainer" containerID="43c5e7fb8065a2ab8e17fc600e071e2631051fa004cfa2210ac324d8928cdde9" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.200755 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.231403 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.264665 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.278490 4631 scope.go:117] "RemoveContainer" containerID="afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.296863 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-config-data\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.296922 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dgpk\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-kube-api-access-2dgpk\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.296973 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.297048 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-tls\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.297083 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-server-conf\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.297125 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-erlang-cookie-secret\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.297160 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-erlang-cookie\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.297197 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-pod-info\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.297226 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-plugins-conf\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.297267 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-confd\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.297406 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-plugins\") pod \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\" (UID: \"566c1b50-db6a-48c9-8d7b-171ac4cdcaf6\") " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.298881 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.314489 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.315891 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.337472 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 04:35:04 crc kubenswrapper[4631]: E1129 04:35:04.337810 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182dca6d-47ba-4896-8e5c-418585e96c46" containerName="extract-utilities" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.337820 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="182dca6d-47ba-4896-8e5c-418585e96c46" containerName="extract-utilities" Nov 29 04:35:04 crc kubenswrapper[4631]: E1129 04:35:04.337831 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdd7deaa-61f9-48f4-96c2-6d10d8df4192" containerName="setup-container" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.337836 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdd7deaa-61f9-48f4-96c2-6d10d8df4192" containerName="setup-container" Nov 29 04:35:04 crc kubenswrapper[4631]: E1129 04:35:04.337853 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" containerName="rabbitmq" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.337861 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" containerName="rabbitmq" Nov 29 04:35:04 crc kubenswrapper[4631]: E1129 04:35:04.337876 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdd7deaa-61f9-48f4-96c2-6d10d8df4192" containerName="rabbitmq" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.337882 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdd7deaa-61f9-48f4-96c2-6d10d8df4192" containerName="rabbitmq" Nov 29 04:35:04 crc kubenswrapper[4631]: E1129 04:35:04.337893 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" containerName="setup-container" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.337899 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" containerName="setup-container" Nov 29 04:35:04 crc kubenswrapper[4631]: E1129 04:35:04.337909 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182dca6d-47ba-4896-8e5c-418585e96c46" containerName="registry-server" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.337915 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="182dca6d-47ba-4896-8e5c-418585e96c46" containerName="registry-server" Nov 29 04:35:04 crc kubenswrapper[4631]: E1129 04:35:04.337931 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182dca6d-47ba-4896-8e5c-418585e96c46" containerName="extract-content" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.337936 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="182dca6d-47ba-4896-8e5c-418585e96c46" containerName="extract-content" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.338099 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdd7deaa-61f9-48f4-96c2-6d10d8df4192" containerName="rabbitmq" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.338111 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="182dca6d-47ba-4896-8e5c-418585e96c46" containerName="registry-server" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.338121 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" containerName="rabbitmq" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.351413 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.356705 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-kube-api-access-2dgpk" (OuterVolumeSpecName: "kube-api-access-2dgpk") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "kube-api-access-2dgpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.356997 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.357123 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-tfvdm" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.357258 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.357373 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.357467 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.359902 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.365000 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.372747 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.372814 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.377730 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "persistence") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.385257 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-pod-info" (OuterVolumeSpecName: "pod-info") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.387046 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.401636 4631 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.401659 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dgpk\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-kube-api-access-2dgpk\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.401678 4631 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.401688 4631 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.401699 4631 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.401708 4631 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.401716 4631 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-pod-info\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.401724 4631 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.488482 4631 scope.go:117] "RemoveContainer" containerID="5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.499829 4631 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502655 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58d4504e-5552-4a9b-8ef8-6442b51cccf1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502685 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502710 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58d4504e-5552-4a9b-8ef8-6442b51cccf1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502739 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502754 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58d4504e-5552-4a9b-8ef8-6442b51cccf1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502771 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502816 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502837 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502863 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5slk\" (UniqueName: \"kubernetes.io/projected/58d4504e-5552-4a9b-8ef8-6442b51cccf1-kube-api-access-s5slk\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502903 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58d4504e-5552-4a9b-8ef8-6442b51cccf1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.502920 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58d4504e-5552-4a9b-8ef8-6442b51cccf1-config-data\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.503558 4631 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.522269 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-config-data" (OuterVolumeSpecName: "config-data") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.568012 4631 scope.go:117] "RemoveContainer" containerID="afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.570225 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-server-conf" (OuterVolumeSpecName: "server-conf") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: E1129 04:35:04.570474 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d\": container with ID starting with afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d not found: ID does not exist" containerID="afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.570585 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d"} err="failed to get container status \"afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d\": rpc error: code = NotFound desc = could not find container \"afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d\": container with ID starting with afe9d399b5cd43470fb6a4253295c874a8defddf589276f8b592fddc20c1266d not found: ID does not exist" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.570664 4631 scope.go:117] "RemoveContainer" containerID="5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396" Nov 29 04:35:04 crc kubenswrapper[4631]: E1129 04:35:04.577229 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396\": container with ID starting with 5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396 not found: ID does not exist" containerID="5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.577346 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396"} err="failed to get container status \"5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396\": rpc error: code = NotFound desc = could not find container \"5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396\": container with ID starting with 5bad335bedbe1a799753b09945d69cecf736307c9bdb8fc9383d73ac1575b396 not found: ID does not exist" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605493 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58d4504e-5552-4a9b-8ef8-6442b51cccf1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605528 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605552 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58d4504e-5552-4a9b-8ef8-6442b51cccf1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605577 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605592 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58d4504e-5552-4a9b-8ef8-6442b51cccf1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605608 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605642 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605663 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605690 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5slk\" (UniqueName: \"kubernetes.io/projected/58d4504e-5552-4a9b-8ef8-6442b51cccf1-kube-api-access-s5slk\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605732 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58d4504e-5552-4a9b-8ef8-6442b51cccf1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605748 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58d4504e-5552-4a9b-8ef8-6442b51cccf1-config-data\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605804 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.605815 4631 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-server-conf\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.606512 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58d4504e-5552-4a9b-8ef8-6442b51cccf1-config-data\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.606510 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.607298 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.607869 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58d4504e-5552-4a9b-8ef8-6442b51cccf1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.608077 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.610672 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58d4504e-5552-4a9b-8ef8-6442b51cccf1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.621294 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.624185 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58d4504e-5552-4a9b-8ef8-6442b51cccf1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.626284 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58d4504e-5552-4a9b-8ef8-6442b51cccf1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.631691 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58d4504e-5552-4a9b-8ef8-6442b51cccf1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.677913 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.703155 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5slk\" (UniqueName: \"kubernetes.io/projected/58d4504e-5552-4a9b-8ef8-6442b51cccf1-kube-api-access-s5slk\") pod \"rabbitmq-server-0\" (UID: \"58d4504e-5552-4a9b-8ef8-6442b51cccf1\") " pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.705677 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" (UID: "566c1b50-db6a-48c9-8d7b-171ac4cdcaf6"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.707140 4631 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.779993 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.844048 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.861113 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.870766 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.872781 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.875880 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.877789 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-7qszg" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.878018 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.878057 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.878155 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.878231 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.878401 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 29 04:35:04 crc kubenswrapper[4631]: I1129 04:35:04.884670 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.017674 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmhmp\" (UniqueName: \"kubernetes.io/projected/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-kube-api-access-kmhmp\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.017739 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.017773 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.017868 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.017970 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.018048 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.018144 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.018213 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.018269 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.018353 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.018383 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.120460 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.120762 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.120901 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.120903 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmhmp\" (UniqueName: \"kubernetes.io/projected/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-kube-api-access-kmhmp\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.120983 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.121024 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.121080 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.121163 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.121222 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.121348 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.121456 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.121471 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.121621 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.121948 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.122163 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.122183 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.122409 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.139549 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.141923 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.142759 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.143721 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmhmp\" (UniqueName: \"kubernetes.io/projected/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-kube-api-access-kmhmp\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.146458 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/65e517ae-4586-44d4-b7d7-0f8f3f23e11f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.167461 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"65e517ae-4586-44d4-b7d7-0f8f3f23e11f\") " pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.199766 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.317045 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="566c1b50-db6a-48c9-8d7b-171ac4cdcaf6" path="/var/lib/kubelet/pods/566c1b50-db6a-48c9-8d7b-171ac4cdcaf6/volumes" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.318107 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdd7deaa-61f9-48f4-96c2-6d10d8df4192" path="/var/lib/kubelet/pods/fdd7deaa-61f9-48f4-96c2-6d10d8df4192/volumes" Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.329309 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 29 04:35:05 crc kubenswrapper[4631]: I1129 04:35:05.795002 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 29 04:35:05 crc kubenswrapper[4631]: W1129 04:35:05.799547 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65e517ae_4586_44d4_b7d7_0f8f3f23e11f.slice/crio-e5c20986485397d971d8a7e396ff1768f91bd1c79a9b4114bc13417fbc1b5d05 WatchSource:0}: Error finding container e5c20986485397d971d8a7e396ff1768f91bd1c79a9b4114bc13417fbc1b5d05: Status 404 returned error can't find the container with id e5c20986485397d971d8a7e396ff1768f91bd1c79a9b4114bc13417fbc1b5d05 Nov 29 04:35:06 crc kubenswrapper[4631]: I1129 04:35:06.235084 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58d4504e-5552-4a9b-8ef8-6442b51cccf1","Type":"ContainerStarted","Data":"cd5d5df9da97d52636cd5d261a96a2a519994cca72f0dc5cdee1e1809cf97c8f"} Nov 29 04:35:06 crc kubenswrapper[4631]: I1129 04:35:06.236579 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"65e517ae-4586-44d4-b7d7-0f8f3f23e11f","Type":"ContainerStarted","Data":"e5c20986485397d971d8a7e396ff1768f91bd1c79a9b4114bc13417fbc1b5d05"} Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.167453 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-5bg48"] Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.170040 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.171945 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.182139 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-5bg48"] Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.247038 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"65e517ae-4586-44d4-b7d7-0f8f3f23e11f","Type":"ContainerStarted","Data":"34c54ddfc91ab812b3bae9a51a35e5b4f54001e3f75cdff030b26bef1f16adfb"} Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.249985 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58d4504e-5552-4a9b-8ef8-6442b51cccf1","Type":"ContainerStarted","Data":"19ebf45f05874be270c04c4d0cfba776d54744a2ee261f74ce250311708b2de8"} Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.264133 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.264190 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.264227 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.264253 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.264281 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.264348 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-config\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.264382 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6xqh\" (UniqueName: \"kubernetes.io/projected/69411a25-f35d-49e5-a105-d7292ab7cea3-kube-api-access-v6xqh\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.365750 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-config\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.365832 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6xqh\" (UniqueName: \"kubernetes.io/projected/69411a25-f35d-49e5-a105-d7292ab7cea3-kube-api-access-v6xqh\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.365907 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.365978 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.366076 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.366122 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.366170 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.367575 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-config\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.370011 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.370389 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.370637 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.370733 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.370920 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.393468 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6xqh\" (UniqueName: \"kubernetes.io/projected/69411a25-f35d-49e5-a105-d7292ab7cea3-kube-api-access-v6xqh\") pod \"dnsmasq-dns-79bd4cc8c9-5bg48\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:07 crc kubenswrapper[4631]: I1129 04:35:07.493792 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:08 crc kubenswrapper[4631]: I1129 04:35:08.512973 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-5bg48"] Nov 29 04:35:09 crc kubenswrapper[4631]: I1129 04:35:09.268366 4631 generic.go:334] "Generic (PLEG): container finished" podID="69411a25-f35d-49e5-a105-d7292ab7cea3" containerID="8c20650aae4b535b7ad5920129b322f2a2c52cf23abf542614cc96d3fa201486" exitCode=0 Nov 29 04:35:09 crc kubenswrapper[4631]: I1129 04:35:09.268447 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" event={"ID":"69411a25-f35d-49e5-a105-d7292ab7cea3","Type":"ContainerDied","Data":"8c20650aae4b535b7ad5920129b322f2a2c52cf23abf542614cc96d3fa201486"} Nov 29 04:35:09 crc kubenswrapper[4631]: I1129 04:35:09.268663 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" event={"ID":"69411a25-f35d-49e5-a105-d7292ab7cea3","Type":"ContainerStarted","Data":"7dfd63e8122471575bd45ab31780babccef6f451658cdc4bad4269c1794ae385"} Nov 29 04:35:10 crc kubenswrapper[4631]: I1129 04:35:10.283219 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" event={"ID":"69411a25-f35d-49e5-a105-d7292ab7cea3","Type":"ContainerStarted","Data":"91d422b5ee149f378d3338ea6656feaf37150ab7fd8a4139db09aceb123332e5"} Nov 29 04:35:10 crc kubenswrapper[4631]: I1129 04:35:10.283488 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:10 crc kubenswrapper[4631]: I1129 04:35:10.320371 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" podStartSLOduration=3.320357025 podStartE2EDuration="3.320357025s" podCreationTimestamp="2025-11-29 04:35:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:35:10.310536484 +0000 UTC m=+1437.375039998" watchObservedRunningTime="2025-11-29 04:35:10.320357025 +0000 UTC m=+1437.384860539" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.496154 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.577436 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-mxxl2"] Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.577815 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" podUID="698c63b4-a1ab-4de8-aab5-b3a676703d4b" containerName="dnsmasq-dns" containerID="cri-o://9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234" gracePeriod=10 Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.755005 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-9mfkk"] Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.757562 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.771928 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-9mfkk"] Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.777843 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.777905 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbwsv\" (UniqueName: \"kubernetes.io/projected/9332abff-609f-424f-8f3c-b72b461489db-kube-api-access-mbwsv\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.777939 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-config\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.777963 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.777995 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.778040 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.778061 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.878713 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbwsv\" (UniqueName: \"kubernetes.io/projected/9332abff-609f-424f-8f3c-b72b461489db-kube-api-access-mbwsv\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.878768 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-config\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.878794 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.878818 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.878896 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.878938 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.879043 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.880019 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.882442 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.882577 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.882892 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.883017 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.883353 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9332abff-609f-424f-8f3c-b72b461489db-config\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:17 crc kubenswrapper[4631]: I1129 04:35:17.928940 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbwsv\" (UniqueName: \"kubernetes.io/projected/9332abff-609f-424f-8f3c-b72b461489db-kube-api-access-mbwsv\") pod \"dnsmasq-dns-6cd9bffc9-9mfkk\" (UID: \"9332abff-609f-424f-8f3c-b72b461489db\") " pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.073929 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.096871 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.284509 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-nb\") pod \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.284864 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-sb\") pod \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.284894 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-svc\") pod \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.284976 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-config\") pod \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.285045 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-swift-storage-0\") pod \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.285087 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44lfm\" (UniqueName: \"kubernetes.io/projected/698c63b4-a1ab-4de8-aab5-b3a676703d4b-kube-api-access-44lfm\") pod \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\" (UID: \"698c63b4-a1ab-4de8-aab5-b3a676703d4b\") " Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.303413 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/698c63b4-a1ab-4de8-aab5-b3a676703d4b-kube-api-access-44lfm" (OuterVolumeSpecName: "kube-api-access-44lfm") pod "698c63b4-a1ab-4de8-aab5-b3a676703d4b" (UID: "698c63b4-a1ab-4de8-aab5-b3a676703d4b"). InnerVolumeSpecName "kube-api-access-44lfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.340760 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "698c63b4-a1ab-4de8-aab5-b3a676703d4b" (UID: "698c63b4-a1ab-4de8-aab5-b3a676703d4b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.351700 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "698c63b4-a1ab-4de8-aab5-b3a676703d4b" (UID: "698c63b4-a1ab-4de8-aab5-b3a676703d4b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.355858 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "698c63b4-a1ab-4de8-aab5-b3a676703d4b" (UID: "698c63b4-a1ab-4de8-aab5-b3a676703d4b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.373066 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-config" (OuterVolumeSpecName: "config") pod "698c63b4-a1ab-4de8-aab5-b3a676703d4b" (UID: "698c63b4-a1ab-4de8-aab5-b3a676703d4b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.376863 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "698c63b4-a1ab-4de8-aab5-b3a676703d4b" (UID: "698c63b4-a1ab-4de8-aab5-b3a676703d4b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.383998 4631 generic.go:334] "Generic (PLEG): container finished" podID="698c63b4-a1ab-4de8-aab5-b3a676703d4b" containerID="9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234" exitCode=0 Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.384035 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" event={"ID":"698c63b4-a1ab-4de8-aab5-b3a676703d4b","Type":"ContainerDied","Data":"9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234"} Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.384060 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" event={"ID":"698c63b4-a1ab-4de8-aab5-b3a676703d4b","Type":"ContainerDied","Data":"55cdbc36d5b33a9c576321ea985a5213da182b2820ae7836bbad8bca5954b7ee"} Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.384075 4631 scope.go:117] "RemoveContainer" containerID="9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.384181 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-mxxl2" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.388703 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.388729 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.388739 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.388748 4631 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.388757 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44lfm\" (UniqueName: \"kubernetes.io/projected/698c63b4-a1ab-4de8-aab5-b3a676703d4b-kube-api-access-44lfm\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.388766 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/698c63b4-a1ab-4de8-aab5-b3a676703d4b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.410578 4631 scope.go:117] "RemoveContainer" containerID="ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.426432 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-mxxl2"] Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.433324 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-mxxl2"] Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.435951 4631 scope.go:117] "RemoveContainer" containerID="9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234" Nov 29 04:35:18 crc kubenswrapper[4631]: E1129 04:35:18.436988 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234\": container with ID starting with 9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234 not found: ID does not exist" containerID="9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.437018 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234"} err="failed to get container status \"9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234\": rpc error: code = NotFound desc = could not find container \"9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234\": container with ID starting with 9ae6e9e4d529774ccffac857f39c567406597be124da919b9fa79ca9e3a60234 not found: ID does not exist" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.437046 4631 scope.go:117] "RemoveContainer" containerID="ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a" Nov 29 04:35:18 crc kubenswrapper[4631]: E1129 04:35:18.437496 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a\": container with ID starting with ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a not found: ID does not exist" containerID="ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.437589 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a"} err="failed to get container status \"ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a\": rpc error: code = NotFound desc = could not find container \"ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a\": container with ID starting with ef5b485a4db11afc79a90b128127d205508551bb5836211651865ada8a89d12a not found: ID does not exist" Nov 29 04:35:18 crc kubenswrapper[4631]: I1129 04:35:18.580239 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-9mfkk"] Nov 29 04:35:19 crc kubenswrapper[4631]: I1129 04:35:19.225168 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="698c63b4-a1ab-4de8-aab5-b3a676703d4b" path="/var/lib/kubelet/pods/698c63b4-a1ab-4de8-aab5-b3a676703d4b/volumes" Nov 29 04:35:19 crc kubenswrapper[4631]: I1129 04:35:19.393966 4631 generic.go:334] "Generic (PLEG): container finished" podID="9332abff-609f-424f-8f3c-b72b461489db" containerID="f85826ae37bf48c61bab8221fb03094483cfaa07e25157f5263a780c20ffb14f" exitCode=0 Nov 29 04:35:19 crc kubenswrapper[4631]: I1129 04:35:19.394001 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" event={"ID":"9332abff-609f-424f-8f3c-b72b461489db","Type":"ContainerDied","Data":"f85826ae37bf48c61bab8221fb03094483cfaa07e25157f5263a780c20ffb14f"} Nov 29 04:35:19 crc kubenswrapper[4631]: I1129 04:35:19.394022 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" event={"ID":"9332abff-609f-424f-8f3c-b72b461489db","Type":"ContainerStarted","Data":"7f61d394716fbc421da5bfd1e0d359ee28fb7bc42394facd0e58880c7d3ff5c0"} Nov 29 04:35:20 crc kubenswrapper[4631]: I1129 04:35:20.405154 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" event={"ID":"9332abff-609f-424f-8f3c-b72b461489db","Type":"ContainerStarted","Data":"a63a559e5987363e223acf28078e58032f24282a05da644556f173375f00e27c"} Nov 29 04:35:20 crc kubenswrapper[4631]: I1129 04:35:20.406722 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:20 crc kubenswrapper[4631]: I1129 04:35:20.439977 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" podStartSLOduration=3.439961098 podStartE2EDuration="3.439961098s" podCreationTimestamp="2025-11-29 04:35:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:35:20.425423311 +0000 UTC m=+1447.489926845" watchObservedRunningTime="2025-11-29 04:35:20.439961098 +0000 UTC m=+1447.504464612" Nov 29 04:35:20 crc kubenswrapper[4631]: I1129 04:35:20.716223 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:35:20 crc kubenswrapper[4631]: I1129 04:35:20.716299 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:35:28 crc kubenswrapper[4631]: I1129 04:35:28.075536 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cd9bffc9-9mfkk" Nov 29 04:35:28 crc kubenswrapper[4631]: I1129 04:35:28.160196 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-5bg48"] Nov 29 04:35:28 crc kubenswrapper[4631]: I1129 04:35:28.160516 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" podUID="69411a25-f35d-49e5-a105-d7292ab7cea3" containerName="dnsmasq-dns" containerID="cri-o://91d422b5ee149f378d3338ea6656feaf37150ab7fd8a4139db09aceb123332e5" gracePeriod=10 Nov 29 04:35:28 crc kubenswrapper[4631]: I1129 04:35:28.513244 4631 generic.go:334] "Generic (PLEG): container finished" podID="69411a25-f35d-49e5-a105-d7292ab7cea3" containerID="91d422b5ee149f378d3338ea6656feaf37150ab7fd8a4139db09aceb123332e5" exitCode=0 Nov 29 04:35:28 crc kubenswrapper[4631]: I1129 04:35:28.513347 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" event={"ID":"69411a25-f35d-49e5-a105-d7292ab7cea3","Type":"ContainerDied","Data":"91d422b5ee149f378d3338ea6656feaf37150ab7fd8a4139db09aceb123332e5"} Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.171635 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.349069 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-nb\") pod \"69411a25-f35d-49e5-a105-d7292ab7cea3\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.349264 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6xqh\" (UniqueName: \"kubernetes.io/projected/69411a25-f35d-49e5-a105-d7292ab7cea3-kube-api-access-v6xqh\") pod \"69411a25-f35d-49e5-a105-d7292ab7cea3\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.349300 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-svc\") pod \"69411a25-f35d-49e5-a105-d7292ab7cea3\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.349371 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-sb\") pod \"69411a25-f35d-49e5-a105-d7292ab7cea3\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.349435 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-config\") pod \"69411a25-f35d-49e5-a105-d7292ab7cea3\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.349464 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-openstack-edpm-ipam\") pod \"69411a25-f35d-49e5-a105-d7292ab7cea3\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.349555 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-swift-storage-0\") pod \"69411a25-f35d-49e5-a105-d7292ab7cea3\" (UID: \"69411a25-f35d-49e5-a105-d7292ab7cea3\") " Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.370482 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69411a25-f35d-49e5-a105-d7292ab7cea3-kube-api-access-v6xqh" (OuterVolumeSpecName: "kube-api-access-v6xqh") pod "69411a25-f35d-49e5-a105-d7292ab7cea3" (UID: "69411a25-f35d-49e5-a105-d7292ab7cea3"). InnerVolumeSpecName "kube-api-access-v6xqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.453008 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6xqh\" (UniqueName: \"kubernetes.io/projected/69411a25-f35d-49e5-a105-d7292ab7cea3-kube-api-access-v6xqh\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.506106 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "69411a25-f35d-49e5-a105-d7292ab7cea3" (UID: "69411a25-f35d-49e5-a105-d7292ab7cea3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.531818 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "69411a25-f35d-49e5-a105-d7292ab7cea3" (UID: "69411a25-f35d-49e5-a105-d7292ab7cea3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.552959 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-config" (OuterVolumeSpecName: "config") pod "69411a25-f35d-49e5-a105-d7292ab7cea3" (UID: "69411a25-f35d-49e5-a105-d7292ab7cea3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.554624 4631 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.554649 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.554659 4631 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-config\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.588225 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "69411a25-f35d-49e5-a105-d7292ab7cea3" (UID: "69411a25-f35d-49e5-a105-d7292ab7cea3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.588661 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" event={"ID":"69411a25-f35d-49e5-a105-d7292ab7cea3","Type":"ContainerDied","Data":"7dfd63e8122471575bd45ab31780babccef6f451658cdc4bad4269c1794ae385"} Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.588719 4631 scope.go:117] "RemoveContainer" containerID="91d422b5ee149f378d3338ea6656feaf37150ab7fd8a4139db09aceb123332e5" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.588924 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-5bg48" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.649756 4631 scope.go:117] "RemoveContainer" containerID="8c20650aae4b535b7ad5920129b322f2a2c52cf23abf542614cc96d3fa201486" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.649881 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "69411a25-f35d-49e5-a105-d7292ab7cea3" (UID: "69411a25-f35d-49e5-a105-d7292ab7cea3"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.650395 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "69411a25-f35d-49e5-a105-d7292ab7cea3" (UID: "69411a25-f35d-49e5-a105-d7292ab7cea3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.657712 4631 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.657802 4631 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.657854 4631 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/69411a25-f35d-49e5-a105-d7292ab7cea3-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.920057 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-5bg48"] Nov 29 04:35:29 crc kubenswrapper[4631]: I1129 04:35:29.926768 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-5bg48"] Nov 29 04:35:31 crc kubenswrapper[4631]: I1129 04:35:31.231365 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69411a25-f35d-49e5-a105-d7292ab7cea3" path="/var/lib/kubelet/pods/69411a25-f35d-49e5-a105-d7292ab7cea3/volumes" Nov 29 04:35:38 crc kubenswrapper[4631]: I1129 04:35:38.687840 4631 generic.go:334] "Generic (PLEG): container finished" podID="58d4504e-5552-4a9b-8ef8-6442b51cccf1" containerID="19ebf45f05874be270c04c4d0cfba776d54744a2ee261f74ce250311708b2de8" exitCode=0 Nov 29 04:35:38 crc kubenswrapper[4631]: I1129 04:35:38.687939 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58d4504e-5552-4a9b-8ef8-6442b51cccf1","Type":"ContainerDied","Data":"19ebf45f05874be270c04c4d0cfba776d54744a2ee261f74ce250311708b2de8"} Nov 29 04:35:39 crc kubenswrapper[4631]: I1129 04:35:39.698645 4631 generic.go:334] "Generic (PLEG): container finished" podID="65e517ae-4586-44d4-b7d7-0f8f3f23e11f" containerID="34c54ddfc91ab812b3bae9a51a35e5b4f54001e3f75cdff030b26bef1f16adfb" exitCode=0 Nov 29 04:35:39 crc kubenswrapper[4631]: I1129 04:35:39.698720 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"65e517ae-4586-44d4-b7d7-0f8f3f23e11f","Type":"ContainerDied","Data":"34c54ddfc91ab812b3bae9a51a35e5b4f54001e3f75cdff030b26bef1f16adfb"} Nov 29 04:35:39 crc kubenswrapper[4631]: I1129 04:35:39.702597 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58d4504e-5552-4a9b-8ef8-6442b51cccf1","Type":"ContainerStarted","Data":"b211372a40170e976b0dc8d204c898301b5efaa72a9785b9431eaae86f591838"} Nov 29 04:35:39 crc kubenswrapper[4631]: I1129 04:35:39.703062 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 29 04:35:39 crc kubenswrapper[4631]: I1129 04:35:39.764267 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=35.764249934 podStartE2EDuration="35.764249934s" podCreationTimestamp="2025-11-29 04:35:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:35:39.752187158 +0000 UTC m=+1466.816690702" watchObservedRunningTime="2025-11-29 04:35:39.764249934 +0000 UTC m=+1466.828753438" Nov 29 04:35:40 crc kubenswrapper[4631]: I1129 04:35:40.716242 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"65e517ae-4586-44d4-b7d7-0f8f3f23e11f","Type":"ContainerStarted","Data":"8250b50769fe2e92d6c342f73cf2844af56c241107b578fb6b3ec556c16f9221"} Nov 29 04:35:40 crc kubenswrapper[4631]: I1129 04:35:40.717371 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:35:40 crc kubenswrapper[4631]: I1129 04:35:40.748828 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.748808688 podStartE2EDuration="36.748808688s" podCreationTimestamp="2025-11-29 04:35:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 04:35:40.744069061 +0000 UTC m=+1467.808572575" watchObservedRunningTime="2025-11-29 04:35:40.748808688 +0000 UTC m=+1467.813312202" Nov 29 04:35:45 crc kubenswrapper[4631]: I1129 04:35:45.339872 4631 patch_prober.go:28] interesting pod/oauth-openshift-76f84477b-6nvlx container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.57:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 29 04:35:45 crc kubenswrapper[4631]: I1129 04:35:45.340493 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-76f84477b-6nvlx" podUID="12d84d2d-71c2-4d50-81b9-fa1f451b3fdf" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.57:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.546112 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d"] Nov 29 04:35:46 crc kubenswrapper[4631]: E1129 04:35:46.546676 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69411a25-f35d-49e5-a105-d7292ab7cea3" containerName="dnsmasq-dns" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.546687 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="69411a25-f35d-49e5-a105-d7292ab7cea3" containerName="dnsmasq-dns" Nov 29 04:35:46 crc kubenswrapper[4631]: E1129 04:35:46.546702 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="698c63b4-a1ab-4de8-aab5-b3a676703d4b" containerName="init" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.546707 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="698c63b4-a1ab-4de8-aab5-b3a676703d4b" containerName="init" Nov 29 04:35:46 crc kubenswrapper[4631]: E1129 04:35:46.546721 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="698c63b4-a1ab-4de8-aab5-b3a676703d4b" containerName="dnsmasq-dns" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.546728 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="698c63b4-a1ab-4de8-aab5-b3a676703d4b" containerName="dnsmasq-dns" Nov 29 04:35:46 crc kubenswrapper[4631]: E1129 04:35:46.546750 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69411a25-f35d-49e5-a105-d7292ab7cea3" containerName="init" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.546757 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="69411a25-f35d-49e5-a105-d7292ab7cea3" containerName="init" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.546923 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="69411a25-f35d-49e5-a105-d7292ab7cea3" containerName="dnsmasq-dns" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.546936 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="698c63b4-a1ab-4de8-aab5-b3a676703d4b" containerName="dnsmasq-dns" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.547789 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.550880 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.551057 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.551160 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.551355 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.568639 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d"] Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.673028 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.673103 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.673190 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gss84\" (UniqueName: \"kubernetes.io/projected/811129e9-11e0-4619-b4ca-1779ce7ae461-kube-api-access-gss84\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.673520 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.775844 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gss84\" (UniqueName: \"kubernetes.io/projected/811129e9-11e0-4619-b4ca-1779ce7ae461-kube-api-access-gss84\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.775940 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.776013 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.776057 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.782312 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.786381 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.787017 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.805459 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gss84\" (UniqueName: \"kubernetes.io/projected/811129e9-11e0-4619-b4ca-1779ce7ae461-kube-api-access-gss84\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:46 crc kubenswrapper[4631]: I1129 04:35:46.863556 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:35:47 crc kubenswrapper[4631]: I1129 04:35:47.476177 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d"] Nov 29 04:35:47 crc kubenswrapper[4631]: I1129 04:35:47.783455 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" event={"ID":"811129e9-11e0-4619-b4ca-1779ce7ae461","Type":"ContainerStarted","Data":"832d055c15258ece5d6413179216b9d8a3b17a7464ff9754195ba07c925a700d"} Nov 29 04:35:50 crc kubenswrapper[4631]: I1129 04:35:50.715898 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:35:50 crc kubenswrapper[4631]: I1129 04:35:50.716398 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:35:50 crc kubenswrapper[4631]: I1129 04:35:50.716435 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:35:50 crc kubenswrapper[4631]: I1129 04:35:50.717123 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9d1f5550d839094fbb4da7f02f707158ef23902852d0f7dc1490b7a66c7dc987"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:35:50 crc kubenswrapper[4631]: I1129 04:35:50.717184 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://9d1f5550d839094fbb4da7f02f707158ef23902852d0f7dc1490b7a66c7dc987" gracePeriod=600 Nov 29 04:35:51 crc kubenswrapper[4631]: I1129 04:35:51.838658 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="9d1f5550d839094fbb4da7f02f707158ef23902852d0f7dc1490b7a66c7dc987" exitCode=0 Nov 29 04:35:51 crc kubenswrapper[4631]: I1129 04:35:51.838729 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"9d1f5550d839094fbb4da7f02f707158ef23902852d0f7dc1490b7a66c7dc987"} Nov 29 04:35:51 crc kubenswrapper[4631]: I1129 04:35:51.839277 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2"} Nov 29 04:35:51 crc kubenswrapper[4631]: I1129 04:35:51.839303 4631 scope.go:117] "RemoveContainer" containerID="b907ff8791c5156baa82f06284d01d372fbfcb2495bb80ab099417356b8d8104" Nov 29 04:35:54 crc kubenswrapper[4631]: I1129 04:35:54.784694 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 29 04:35:55 crc kubenswrapper[4631]: I1129 04:35:55.203516 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 29 04:36:00 crc kubenswrapper[4631]: I1129 04:36:00.936874 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" event={"ID":"811129e9-11e0-4619-b4ca-1779ce7ae461","Type":"ContainerStarted","Data":"5c62e64d2557855642cb05d4a1208bd9cb6113a25bf5a296577f72464552577e"} Nov 29 04:36:00 crc kubenswrapper[4631]: I1129 04:36:00.970378 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" podStartSLOduration=2.067798203 podStartE2EDuration="14.970359079s" podCreationTimestamp="2025-11-29 04:35:46 +0000 UTC" firstStartedPulling="2025-11-29 04:35:47.490080787 +0000 UTC m=+1474.554584301" lastFinishedPulling="2025-11-29 04:36:00.392641673 +0000 UTC m=+1487.457145177" observedRunningTime="2025-11-29 04:36:00.95655265 +0000 UTC m=+1488.021056164" watchObservedRunningTime="2025-11-29 04:36:00.970359079 +0000 UTC m=+1488.034862633" Nov 29 04:36:15 crc kubenswrapper[4631]: I1129 04:36:15.131468 4631 generic.go:334] "Generic (PLEG): container finished" podID="811129e9-11e0-4619-b4ca-1779ce7ae461" containerID="5c62e64d2557855642cb05d4a1208bd9cb6113a25bf5a296577f72464552577e" exitCode=0 Nov 29 04:36:15 crc kubenswrapper[4631]: I1129 04:36:15.132095 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" event={"ID":"811129e9-11e0-4619-b4ca-1779ce7ae461","Type":"ContainerDied","Data":"5c62e64d2557855642cb05d4a1208bd9cb6113a25bf5a296577f72464552577e"} Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.559656 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.695729 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-inventory\") pod \"811129e9-11e0-4619-b4ca-1779ce7ae461\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.695880 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gss84\" (UniqueName: \"kubernetes.io/projected/811129e9-11e0-4619-b4ca-1779ce7ae461-kube-api-access-gss84\") pod \"811129e9-11e0-4619-b4ca-1779ce7ae461\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.695915 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-ssh-key\") pod \"811129e9-11e0-4619-b4ca-1779ce7ae461\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.696117 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-repo-setup-combined-ca-bundle\") pod \"811129e9-11e0-4619-b4ca-1779ce7ae461\" (UID: \"811129e9-11e0-4619-b4ca-1779ce7ae461\") " Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.703870 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "811129e9-11e0-4619-b4ca-1779ce7ae461" (UID: "811129e9-11e0-4619-b4ca-1779ce7ae461"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.704678 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/811129e9-11e0-4619-b4ca-1779ce7ae461-kube-api-access-gss84" (OuterVolumeSpecName: "kube-api-access-gss84") pod "811129e9-11e0-4619-b4ca-1779ce7ae461" (UID: "811129e9-11e0-4619-b4ca-1779ce7ae461"). InnerVolumeSpecName "kube-api-access-gss84". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.731687 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-inventory" (OuterVolumeSpecName: "inventory") pod "811129e9-11e0-4619-b4ca-1779ce7ae461" (UID: "811129e9-11e0-4619-b4ca-1779ce7ae461"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.733166 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "811129e9-11e0-4619-b4ca-1779ce7ae461" (UID: "811129e9-11e0-4619-b4ca-1779ce7ae461"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.800770 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.801390 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gss84\" (UniqueName: \"kubernetes.io/projected/811129e9-11e0-4619-b4ca-1779ce7ae461-kube-api-access-gss84\") on node \"crc\" DevicePath \"\"" Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.801407 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:36:16 crc kubenswrapper[4631]: I1129 04:36:16.801416 4631 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/811129e9-11e0-4619-b4ca-1779ce7ae461-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.156151 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" event={"ID":"811129e9-11e0-4619-b4ca-1779ce7ae461","Type":"ContainerDied","Data":"832d055c15258ece5d6413179216b9d8a3b17a7464ff9754195ba07c925a700d"} Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.156676 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="832d055c15258ece5d6413179216b9d8a3b17a7464ff9754195ba07c925a700d" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.156218 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.313281 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z"] Nov 29 04:36:17 crc kubenswrapper[4631]: E1129 04:36:17.316830 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="811129e9-11e0-4619-b4ca-1779ce7ae461" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.316878 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="811129e9-11e0-4619-b4ca-1779ce7ae461" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.317249 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="811129e9-11e0-4619-b4ca-1779ce7ae461" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.318118 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.321192 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.321965 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.322161 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.323439 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.341254 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z"] Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.516396 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-c777z\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.516506 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdwxl\" (UniqueName: \"kubernetes.io/projected/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-kube-api-access-zdwxl\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-c777z\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.516551 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-c777z\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.618398 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdwxl\" (UniqueName: \"kubernetes.io/projected/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-kube-api-access-zdwxl\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-c777z\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.618460 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-c777z\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.618524 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-c777z\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.626027 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-c777z\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.626151 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-c777z\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.634428 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdwxl\" (UniqueName: \"kubernetes.io/projected/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-kube-api-access-zdwxl\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-c777z\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:17 crc kubenswrapper[4631]: I1129 04:36:17.640982 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:18 crc kubenswrapper[4631]: I1129 04:36:18.172522 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z"] Nov 29 04:36:19 crc kubenswrapper[4631]: I1129 04:36:19.180198 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" event={"ID":"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c","Type":"ContainerStarted","Data":"c0114ab7bb8295d2b8fbed745fe05e45c16a0e334e0e5c21d3d7ed404886cc8d"} Nov 29 04:36:19 crc kubenswrapper[4631]: I1129 04:36:19.180249 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" event={"ID":"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c","Type":"ContainerStarted","Data":"455a497700686aea9bb04612cc45207ea0082f0834b130f7dee579a201f33583"} Nov 29 04:36:19 crc kubenswrapper[4631]: I1129 04:36:19.208531 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" podStartSLOduration=1.556255548 podStartE2EDuration="2.208510095s" podCreationTimestamp="2025-11-29 04:36:17 +0000 UTC" firstStartedPulling="2025-11-29 04:36:18.168669037 +0000 UTC m=+1505.233172551" lastFinishedPulling="2025-11-29 04:36:18.820923584 +0000 UTC m=+1505.885427098" observedRunningTime="2025-11-29 04:36:19.205512031 +0000 UTC m=+1506.270015545" watchObservedRunningTime="2025-11-29 04:36:19.208510095 +0000 UTC m=+1506.273013609" Nov 29 04:36:22 crc kubenswrapper[4631]: I1129 04:36:22.220088 4631 generic.go:334] "Generic (PLEG): container finished" podID="b9f1d0ab-6519-4eb8-aacc-539fb0e6433c" containerID="c0114ab7bb8295d2b8fbed745fe05e45c16a0e334e0e5c21d3d7ed404886cc8d" exitCode=0 Nov 29 04:36:22 crc kubenswrapper[4631]: I1129 04:36:22.220196 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" event={"ID":"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c","Type":"ContainerDied","Data":"c0114ab7bb8295d2b8fbed745fe05e45c16a0e334e0e5c21d3d7ed404886cc8d"} Nov 29 04:36:23 crc kubenswrapper[4631]: I1129 04:36:23.647263 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:23 crc kubenswrapper[4631]: I1129 04:36:23.769407 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-ssh-key\") pod \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " Nov 29 04:36:23 crc kubenswrapper[4631]: I1129 04:36:23.769509 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-inventory\") pod \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " Nov 29 04:36:23 crc kubenswrapper[4631]: I1129 04:36:23.769571 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdwxl\" (UniqueName: \"kubernetes.io/projected/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-kube-api-access-zdwxl\") pod \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\" (UID: \"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c\") " Nov 29 04:36:23 crc kubenswrapper[4631]: I1129 04:36:23.777354 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-kube-api-access-zdwxl" (OuterVolumeSpecName: "kube-api-access-zdwxl") pod "b9f1d0ab-6519-4eb8-aacc-539fb0e6433c" (UID: "b9f1d0ab-6519-4eb8-aacc-539fb0e6433c"). InnerVolumeSpecName "kube-api-access-zdwxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:36:23 crc kubenswrapper[4631]: I1129 04:36:23.802399 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-inventory" (OuterVolumeSpecName: "inventory") pod "b9f1d0ab-6519-4eb8-aacc-539fb0e6433c" (UID: "b9f1d0ab-6519-4eb8-aacc-539fb0e6433c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:36:23 crc kubenswrapper[4631]: I1129 04:36:23.807977 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b9f1d0ab-6519-4eb8-aacc-539fb0e6433c" (UID: "b9f1d0ab-6519-4eb8-aacc-539fb0e6433c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:36:23 crc kubenswrapper[4631]: I1129 04:36:23.871979 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdwxl\" (UniqueName: \"kubernetes.io/projected/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-kube-api-access-zdwxl\") on node \"crc\" DevicePath \"\"" Nov 29 04:36:23 crc kubenswrapper[4631]: I1129 04:36:23.872018 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:36:23 crc kubenswrapper[4631]: I1129 04:36:23.872029 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9f1d0ab-6519-4eb8-aacc-539fb0e6433c-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.249914 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" event={"ID":"b9f1d0ab-6519-4eb8-aacc-539fb0e6433c","Type":"ContainerDied","Data":"455a497700686aea9bb04612cc45207ea0082f0834b130f7dee579a201f33583"} Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.250203 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="455a497700686aea9bb04612cc45207ea0082f0834b130f7dee579a201f33583" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.250280 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-c777z" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.359938 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj"] Nov 29 04:36:24 crc kubenswrapper[4631]: E1129 04:36:24.360394 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9f1d0ab-6519-4eb8-aacc-539fb0e6433c" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.360411 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9f1d0ab-6519-4eb8-aacc-539fb0e6433c" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.360612 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9f1d0ab-6519-4eb8-aacc-539fb0e6433c" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.361262 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.364212 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.364666 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.365197 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.365706 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.369249 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj"] Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.388363 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.388458 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7hg7\" (UniqueName: \"kubernetes.io/projected/c7c0f56e-3925-47a4-9516-9c9d662540db-kube-api-access-v7hg7\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.388533 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.388590 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.491163 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.491343 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.491471 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7hg7\" (UniqueName: \"kubernetes.io/projected/c7c0f56e-3925-47a4-9516-9c9d662540db-kube-api-access-v7hg7\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.491605 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.503941 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.504773 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.506838 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.519533 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7hg7\" (UniqueName: \"kubernetes.io/projected/c7c0f56e-3925-47a4-9516-9c9d662540db-kube-api-access-v7hg7\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:24 crc kubenswrapper[4631]: I1129 04:36:24.691250 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:36:25 crc kubenswrapper[4631]: I1129 04:36:25.317106 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj"] Nov 29 04:36:26 crc kubenswrapper[4631]: I1129 04:36:26.268596 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" event={"ID":"c7c0f56e-3925-47a4-9516-9c9d662540db","Type":"ContainerStarted","Data":"1d3ec5215f846d1b18d754f84b69fe64313fe582a8ce2c2df626c13404e7bf9a"} Nov 29 04:36:27 crc kubenswrapper[4631]: I1129 04:36:27.285721 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" event={"ID":"c7c0f56e-3925-47a4-9516-9c9d662540db","Type":"ContainerStarted","Data":"2d33e8f87321485aa3a3ab37f1258fd0b8e1b856be33b17e1d7864278bf6ffea"} Nov 29 04:36:27 crc kubenswrapper[4631]: I1129 04:36:27.330273 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" podStartSLOduration=2.488929048 podStartE2EDuration="3.33024981s" podCreationTimestamp="2025-11-29 04:36:24 +0000 UTC" firstStartedPulling="2025-11-29 04:36:25.337667169 +0000 UTC m=+1512.402170683" lastFinishedPulling="2025-11-29 04:36:26.178987901 +0000 UTC m=+1513.243491445" observedRunningTime="2025-11-29 04:36:27.310314292 +0000 UTC m=+1514.374817806" watchObservedRunningTime="2025-11-29 04:36:27.33024981 +0000 UTC m=+1514.394753324" Nov 29 04:37:00 crc kubenswrapper[4631]: I1129 04:37:00.417551 4631 scope.go:117] "RemoveContainer" containerID="cd7994f5257d63f63b3dec09c5413ad86d9a8349949a5d6affeb80dcec240d51" Nov 29 04:37:00 crc kubenswrapper[4631]: I1129 04:37:00.449352 4631 scope.go:117] "RemoveContainer" containerID="809fd41a7c49c20fb18551e80cf3895e47ae14d32e10f53e1382dc48742f2ebe" Nov 29 04:37:00 crc kubenswrapper[4631]: I1129 04:37:00.495784 4631 scope.go:117] "RemoveContainer" containerID="07f60739dd49e34c3e697d6f139a2ed988c4c05a9fb4115b577dbb0ba02e551c" Nov 29 04:37:00 crc kubenswrapper[4631]: I1129 04:37:00.534681 4631 scope.go:117] "RemoveContainer" containerID="ad95cea063e9d485d805de7dd1ad46aaf52c15e5c59f83e5f29be1e83c4ecbfc" Nov 29 04:37:00 crc kubenswrapper[4631]: I1129 04:37:00.577194 4631 scope.go:117] "RemoveContainer" containerID="cec8aba9383136d5e254e8796ae2d99a5d54042238ec8803eb725bec4e35a161" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.228150 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jp758"] Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.230993 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.259675 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jp758"] Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.407487 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-utilities\") pod \"redhat-marketplace-jp758\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.407654 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-catalog-content\") pod \"redhat-marketplace-jp758\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.407701 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwt49\" (UniqueName: \"kubernetes.io/projected/679fb048-3d2d-4024-89d2-a28700283b60-kube-api-access-lwt49\") pod \"redhat-marketplace-jp758\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.509121 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-catalog-content\") pod \"redhat-marketplace-jp758\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.509212 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwt49\" (UniqueName: \"kubernetes.io/projected/679fb048-3d2d-4024-89d2-a28700283b60-kube-api-access-lwt49\") pod \"redhat-marketplace-jp758\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.509271 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-utilities\") pod \"redhat-marketplace-jp758\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.509716 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-catalog-content\") pod \"redhat-marketplace-jp758\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.509725 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-utilities\") pod \"redhat-marketplace-jp758\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.532611 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwt49\" (UniqueName: \"kubernetes.io/projected/679fb048-3d2d-4024-89d2-a28700283b60-kube-api-access-lwt49\") pod \"redhat-marketplace-jp758\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:10 crc kubenswrapper[4631]: I1129 04:37:10.560811 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:11 crc kubenswrapper[4631]: I1129 04:37:11.002564 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jp758"] Nov 29 04:37:11 crc kubenswrapper[4631]: I1129 04:37:11.798437 4631 generic.go:334] "Generic (PLEG): container finished" podID="679fb048-3d2d-4024-89d2-a28700283b60" containerID="d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3" exitCode=0 Nov 29 04:37:11 crc kubenswrapper[4631]: I1129 04:37:11.798484 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jp758" event={"ID":"679fb048-3d2d-4024-89d2-a28700283b60","Type":"ContainerDied","Data":"d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3"} Nov 29 04:37:11 crc kubenswrapper[4631]: I1129 04:37:11.798688 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jp758" event={"ID":"679fb048-3d2d-4024-89d2-a28700283b60","Type":"ContainerStarted","Data":"25faf7b1fb7145a1cb44944b0192e427944b934ee7ce687fb867e16f45cb6a79"} Nov 29 04:37:12 crc kubenswrapper[4631]: I1129 04:37:12.810853 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jp758" event={"ID":"679fb048-3d2d-4024-89d2-a28700283b60","Type":"ContainerStarted","Data":"96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94"} Nov 29 04:37:14 crc kubenswrapper[4631]: I1129 04:37:14.838965 4631 generic.go:334] "Generic (PLEG): container finished" podID="679fb048-3d2d-4024-89d2-a28700283b60" containerID="96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94" exitCode=0 Nov 29 04:37:14 crc kubenswrapper[4631]: I1129 04:37:14.839302 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jp758" event={"ID":"679fb048-3d2d-4024-89d2-a28700283b60","Type":"ContainerDied","Data":"96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94"} Nov 29 04:37:15 crc kubenswrapper[4631]: I1129 04:37:15.856489 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jp758" event={"ID":"679fb048-3d2d-4024-89d2-a28700283b60","Type":"ContainerStarted","Data":"66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179"} Nov 29 04:37:15 crc kubenswrapper[4631]: I1129 04:37:15.881471 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jp758" podStartSLOduration=2.275787002 podStartE2EDuration="5.881454742s" podCreationTimestamp="2025-11-29 04:37:10 +0000 UTC" firstStartedPulling="2025-11-29 04:37:11.802981413 +0000 UTC m=+1558.867484927" lastFinishedPulling="2025-11-29 04:37:15.408649153 +0000 UTC m=+1562.473152667" observedRunningTime="2025-11-29 04:37:15.875877625 +0000 UTC m=+1562.940381149" watchObservedRunningTime="2025-11-29 04:37:15.881454742 +0000 UTC m=+1562.945958256" Nov 29 04:37:20 crc kubenswrapper[4631]: I1129 04:37:20.561176 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:20 crc kubenswrapper[4631]: I1129 04:37:20.561467 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:20 crc kubenswrapper[4631]: I1129 04:37:20.658865 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:20 crc kubenswrapper[4631]: I1129 04:37:20.982436 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:21 crc kubenswrapper[4631]: I1129 04:37:21.079247 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jp758"] Nov 29 04:37:22 crc kubenswrapper[4631]: I1129 04:37:22.938589 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jp758" podUID="679fb048-3d2d-4024-89d2-a28700283b60" containerName="registry-server" containerID="cri-o://66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179" gracePeriod=2 Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.331591 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tn2vg"] Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.333845 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.337282 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-catalog-content\") pod \"certified-operators-tn2vg\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.337623 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-utilities\") pod \"certified-operators-tn2vg\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.337672 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jrcd\" (UniqueName: \"kubernetes.io/projected/5f26e2f5-b872-435d-8af4-573dff4aa342-kube-api-access-9jrcd\") pod \"certified-operators-tn2vg\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.358570 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tn2vg"] Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.441343 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-catalog-content\") pod \"certified-operators-tn2vg\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.441392 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-utilities\") pod \"certified-operators-tn2vg\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.441432 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jrcd\" (UniqueName: \"kubernetes.io/projected/5f26e2f5-b872-435d-8af4-573dff4aa342-kube-api-access-9jrcd\") pod \"certified-operators-tn2vg\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.443207 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-catalog-content\") pod \"certified-operators-tn2vg\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.444062 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-utilities\") pod \"certified-operators-tn2vg\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.464298 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jrcd\" (UniqueName: \"kubernetes.io/projected/5f26e2f5-b872-435d-8af4-573dff4aa342-kube-api-access-9jrcd\") pod \"certified-operators-tn2vg\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.513350 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.544431 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwt49\" (UniqueName: \"kubernetes.io/projected/679fb048-3d2d-4024-89d2-a28700283b60-kube-api-access-lwt49\") pod \"679fb048-3d2d-4024-89d2-a28700283b60\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.544530 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-utilities\") pod \"679fb048-3d2d-4024-89d2-a28700283b60\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.544626 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-catalog-content\") pod \"679fb048-3d2d-4024-89d2-a28700283b60\" (UID: \"679fb048-3d2d-4024-89d2-a28700283b60\") " Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.568192 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "679fb048-3d2d-4024-89d2-a28700283b60" (UID: "679fb048-3d2d-4024-89d2-a28700283b60"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.576247 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-utilities" (OuterVolumeSpecName: "utilities") pod "679fb048-3d2d-4024-89d2-a28700283b60" (UID: "679fb048-3d2d-4024-89d2-a28700283b60"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.576509 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/679fb048-3d2d-4024-89d2-a28700283b60-kube-api-access-lwt49" (OuterVolumeSpecName: "kube-api-access-lwt49") pod "679fb048-3d2d-4024-89d2-a28700283b60" (UID: "679fb048-3d2d-4024-89d2-a28700283b60"). InnerVolumeSpecName "kube-api-access-lwt49". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.646454 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.646489 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwt49\" (UniqueName: \"kubernetes.io/projected/679fb048-3d2d-4024-89d2-a28700283b60-kube-api-access-lwt49\") on node \"crc\" DevicePath \"\"" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.646504 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/679fb048-3d2d-4024-89d2-a28700283b60-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.659368 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.955909 4631 generic.go:334] "Generic (PLEG): container finished" podID="679fb048-3d2d-4024-89d2-a28700283b60" containerID="66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179" exitCode=0 Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.956003 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jp758" event={"ID":"679fb048-3d2d-4024-89d2-a28700283b60","Type":"ContainerDied","Data":"66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179"} Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.956022 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jp758" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.956197 4631 scope.go:117] "RemoveContainer" containerID="66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179" Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.956181 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jp758" event={"ID":"679fb048-3d2d-4024-89d2-a28700283b60","Type":"ContainerDied","Data":"25faf7b1fb7145a1cb44944b0192e427944b934ee7ce687fb867e16f45cb6a79"} Nov 29 04:37:23 crc kubenswrapper[4631]: I1129 04:37:23.978740 4631 scope.go:117] "RemoveContainer" containerID="96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94" Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.005357 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jp758"] Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.011181 4631 scope.go:117] "RemoveContainer" containerID="d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3" Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.020890 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jp758"] Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.033358 4631 scope.go:117] "RemoveContainer" containerID="66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179" Nov 29 04:37:24 crc kubenswrapper[4631]: E1129 04:37:24.038495 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179\": container with ID starting with 66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179 not found: ID does not exist" containerID="66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179" Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.038542 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179"} err="failed to get container status \"66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179\": rpc error: code = NotFound desc = could not find container \"66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179\": container with ID starting with 66d62b8dcda93c8d2aeb78e57c638839f439e336da210d29e2bdb13e61c9b179 not found: ID does not exist" Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.038571 4631 scope.go:117] "RemoveContainer" containerID="96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94" Nov 29 04:37:24 crc kubenswrapper[4631]: E1129 04:37:24.039384 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94\": container with ID starting with 96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94 not found: ID does not exist" containerID="96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94" Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.039409 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94"} err="failed to get container status \"96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94\": rpc error: code = NotFound desc = could not find container \"96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94\": container with ID starting with 96775318c9bf69e5d4e4ea2f315b7be0a3a635c14aa6fc9972c6ccf2ded0bf94 not found: ID does not exist" Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.039424 4631 scope.go:117] "RemoveContainer" containerID="d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3" Nov 29 04:37:24 crc kubenswrapper[4631]: E1129 04:37:24.039602 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3\": container with ID starting with d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3 not found: ID does not exist" containerID="d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3" Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.039626 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3"} err="failed to get container status \"d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3\": rpc error: code = NotFound desc = could not find container \"d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3\": container with ID starting with d81dfa6ded204c5e0afb299f3ff555bfefa249d7c204ddc34309f6fabeb963f3 not found: ID does not exist" Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.090195 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tn2vg"] Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.964277 4631 generic.go:334] "Generic (PLEG): container finished" podID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerID="08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a" exitCode=0 Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.964414 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn2vg" event={"ID":"5f26e2f5-b872-435d-8af4-573dff4aa342","Type":"ContainerDied","Data":"08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a"} Nov 29 04:37:24 crc kubenswrapper[4631]: I1129 04:37:24.964571 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn2vg" event={"ID":"5f26e2f5-b872-435d-8af4-573dff4aa342","Type":"ContainerStarted","Data":"64c2696d87ee093cf4f34067376669a6bfba7f3da7b678f4f87315ebf631c7c1"} Nov 29 04:37:25 crc kubenswrapper[4631]: I1129 04:37:25.236044 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="679fb048-3d2d-4024-89d2-a28700283b60" path="/var/lib/kubelet/pods/679fb048-3d2d-4024-89d2-a28700283b60/volumes" Nov 29 04:37:27 crc kubenswrapper[4631]: I1129 04:37:27.024400 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn2vg" event={"ID":"5f26e2f5-b872-435d-8af4-573dff4aa342","Type":"ContainerStarted","Data":"5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608"} Nov 29 04:37:28 crc kubenswrapper[4631]: I1129 04:37:28.041477 4631 generic.go:334] "Generic (PLEG): container finished" podID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerID="5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608" exitCode=0 Nov 29 04:37:28 crc kubenswrapper[4631]: I1129 04:37:28.041559 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn2vg" event={"ID":"5f26e2f5-b872-435d-8af4-573dff4aa342","Type":"ContainerDied","Data":"5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608"} Nov 29 04:37:29 crc kubenswrapper[4631]: I1129 04:37:29.074903 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn2vg" event={"ID":"5f26e2f5-b872-435d-8af4-573dff4aa342","Type":"ContainerStarted","Data":"cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388"} Nov 29 04:37:29 crc kubenswrapper[4631]: I1129 04:37:29.100984 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tn2vg" podStartSLOduration=2.5069514550000003 podStartE2EDuration="6.100968778s" podCreationTimestamp="2025-11-29 04:37:23 +0000 UTC" firstStartedPulling="2025-11-29 04:37:24.965926454 +0000 UTC m=+1572.030429968" lastFinishedPulling="2025-11-29 04:37:28.559943737 +0000 UTC m=+1575.624447291" observedRunningTime="2025-11-29 04:37:29.095618527 +0000 UTC m=+1576.160122051" watchObservedRunningTime="2025-11-29 04:37:29.100968778 +0000 UTC m=+1576.165472292" Nov 29 04:37:33 crc kubenswrapper[4631]: I1129 04:37:33.659713 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:33 crc kubenswrapper[4631]: I1129 04:37:33.660324 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:33 crc kubenswrapper[4631]: I1129 04:37:33.731637 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:34 crc kubenswrapper[4631]: I1129 04:37:34.190494 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:34 crc kubenswrapper[4631]: I1129 04:37:34.255699 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tn2vg"] Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.143134 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tn2vg" podUID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerName="registry-server" containerID="cri-o://cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388" gracePeriod=2 Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.672842 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.831002 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jrcd\" (UniqueName: \"kubernetes.io/projected/5f26e2f5-b872-435d-8af4-573dff4aa342-kube-api-access-9jrcd\") pod \"5f26e2f5-b872-435d-8af4-573dff4aa342\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.831709 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-utilities\") pod \"5f26e2f5-b872-435d-8af4-573dff4aa342\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.831859 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-catalog-content\") pod \"5f26e2f5-b872-435d-8af4-573dff4aa342\" (UID: \"5f26e2f5-b872-435d-8af4-573dff4aa342\") " Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.833611 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-utilities" (OuterVolumeSpecName: "utilities") pod "5f26e2f5-b872-435d-8af4-573dff4aa342" (UID: "5f26e2f5-b872-435d-8af4-573dff4aa342"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.846461 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f26e2f5-b872-435d-8af4-573dff4aa342-kube-api-access-9jrcd" (OuterVolumeSpecName: "kube-api-access-9jrcd") pod "5f26e2f5-b872-435d-8af4-573dff4aa342" (UID: "5f26e2f5-b872-435d-8af4-573dff4aa342"). InnerVolumeSpecName "kube-api-access-9jrcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.892887 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f26e2f5-b872-435d-8af4-573dff4aa342" (UID: "5f26e2f5-b872-435d-8af4-573dff4aa342"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.934978 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.935032 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f26e2f5-b872-435d-8af4-573dff4aa342-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:37:36 crc kubenswrapper[4631]: I1129 04:37:36.935057 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jrcd\" (UniqueName: \"kubernetes.io/projected/5f26e2f5-b872-435d-8af4-573dff4aa342-kube-api-access-9jrcd\") on node \"crc\" DevicePath \"\"" Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.159200 4631 generic.go:334] "Generic (PLEG): container finished" podID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerID="cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388" exitCode=0 Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.159360 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tn2vg" Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.160663 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn2vg" event={"ID":"5f26e2f5-b872-435d-8af4-573dff4aa342","Type":"ContainerDied","Data":"cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388"} Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.160798 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn2vg" event={"ID":"5f26e2f5-b872-435d-8af4-573dff4aa342","Type":"ContainerDied","Data":"64c2696d87ee093cf4f34067376669a6bfba7f3da7b678f4f87315ebf631c7c1"} Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.160891 4631 scope.go:117] "RemoveContainer" containerID="cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388" Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.190776 4631 scope.go:117] "RemoveContainer" containerID="5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608" Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.218807 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tn2vg"] Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.234268 4631 scope.go:117] "RemoveContainer" containerID="08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a" Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.271955 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tn2vg"] Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.309683 4631 scope.go:117] "RemoveContainer" containerID="cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388" Nov 29 04:37:37 crc kubenswrapper[4631]: E1129 04:37:37.310026 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388\": container with ID starting with cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388 not found: ID does not exist" containerID="cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388" Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.310079 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388"} err="failed to get container status \"cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388\": rpc error: code = NotFound desc = could not find container \"cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388\": container with ID starting with cb4f8873d5f82b7b41db8416d8b21d3ffdb0a46e8792480f969b42c1ecd53388 not found: ID does not exist" Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.310100 4631 scope.go:117] "RemoveContainer" containerID="5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608" Nov 29 04:37:37 crc kubenswrapper[4631]: E1129 04:37:37.310410 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608\": container with ID starting with 5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608 not found: ID does not exist" containerID="5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608" Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.310465 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608"} err="failed to get container status \"5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608\": rpc error: code = NotFound desc = could not find container \"5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608\": container with ID starting with 5192673038a7446f9c9daf11341d7075429e7a66d03d7228f4e0e98154835608 not found: ID does not exist" Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.310649 4631 scope.go:117] "RemoveContainer" containerID="08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a" Nov 29 04:37:37 crc kubenswrapper[4631]: E1129 04:37:37.310897 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a\": container with ID starting with 08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a not found: ID does not exist" containerID="08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a" Nov 29 04:37:37 crc kubenswrapper[4631]: I1129 04:37:37.310937 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a"} err="failed to get container status \"08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a\": rpc error: code = NotFound desc = could not find container \"08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a\": container with ID starting with 08b0f150fa1588645ba21797bff1b1233310817e6318e2d09cb8e8977da17a8a not found: ID does not exist" Nov 29 04:37:39 crc kubenswrapper[4631]: I1129 04:37:39.227574 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f26e2f5-b872-435d-8af4-573dff4aa342" path="/var/lib/kubelet/pods/5f26e2f5-b872-435d-8af4-573dff4aa342/volumes" Nov 29 04:38:20 crc kubenswrapper[4631]: I1129 04:38:20.715865 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:38:20 crc kubenswrapper[4631]: I1129 04:38:20.716644 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:38:50 crc kubenswrapper[4631]: I1129 04:38:50.717155 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:38:50 crc kubenswrapper[4631]: I1129 04:38:50.717759 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:39:20 crc kubenswrapper[4631]: I1129 04:39:20.716127 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:39:20 crc kubenswrapper[4631]: I1129 04:39:20.716733 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:39:20 crc kubenswrapper[4631]: I1129 04:39:20.716781 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:39:20 crc kubenswrapper[4631]: I1129 04:39:20.717631 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:39:20 crc kubenswrapper[4631]: I1129 04:39:20.717687 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" gracePeriod=600 Nov 29 04:39:20 crc kubenswrapper[4631]: E1129 04:39:20.837965 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:39:21 crc kubenswrapper[4631]: I1129 04:39:21.406034 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" exitCode=0 Nov 29 04:39:21 crc kubenswrapper[4631]: I1129 04:39:21.406197 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2"} Nov 29 04:39:21 crc kubenswrapper[4631]: I1129 04:39:21.406439 4631 scope.go:117] "RemoveContainer" containerID="9d1f5550d839094fbb4da7f02f707158ef23902852d0f7dc1490b7a66c7dc987" Nov 29 04:39:21 crc kubenswrapper[4631]: I1129 04:39:21.407156 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:39:21 crc kubenswrapper[4631]: E1129 04:39:21.407495 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:39:32 crc kubenswrapper[4631]: I1129 04:39:32.217087 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:39:32 crc kubenswrapper[4631]: E1129 04:39:32.218129 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:39:36 crc kubenswrapper[4631]: I1129 04:39:36.075056 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-76d2-account-create-update-8cjpz"] Nov 29 04:39:36 crc kubenswrapper[4631]: I1129 04:39:36.085403 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-wqw7t"] Nov 29 04:39:36 crc kubenswrapper[4631]: I1129 04:39:36.092200 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-76d2-account-create-update-8cjpz"] Nov 29 04:39:36 crc kubenswrapper[4631]: I1129 04:39:36.098678 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-wqw7t"] Nov 29 04:39:37 crc kubenswrapper[4631]: I1129 04:39:37.231677 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a5ebe9e-271d-4e18-88ee-cd5933fa1a38" path="/var/lib/kubelet/pods/2a5ebe9e-271d-4e18-88ee-cd5933fa1a38/volumes" Nov 29 04:39:37 crc kubenswrapper[4631]: I1129 04:39:37.233712 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c93fa44-2e1c-406b-aa4f-967436e33d1f" path="/var/lib/kubelet/pods/8c93fa44-2e1c-406b-aa4f-967436e33d1f/volumes" Nov 29 04:39:41 crc kubenswrapper[4631]: I1129 04:39:41.033158 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-d2q8v"] Nov 29 04:39:41 crc kubenswrapper[4631]: I1129 04:39:41.041642 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-nxmq7"] Nov 29 04:39:41 crc kubenswrapper[4631]: I1129 04:39:41.049891 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5ce7-account-create-update-bnj85"] Nov 29 04:39:41 crc kubenswrapper[4631]: I1129 04:39:41.064818 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-nxmq7"] Nov 29 04:39:41 crc kubenswrapper[4631]: I1129 04:39:41.075556 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-d2q8v"] Nov 29 04:39:41 crc kubenswrapper[4631]: I1129 04:39:41.085111 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5ce7-account-create-update-bnj85"] Nov 29 04:39:41 crc kubenswrapper[4631]: I1129 04:39:41.227324 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4" path="/var/lib/kubelet/pods/3ca8d74c-2b26-4c5a-9d68-4773da8f6ff4/volumes" Nov 29 04:39:41 crc kubenswrapper[4631]: I1129 04:39:41.228719 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88af8b16-da55-45bc-b04a-38984d9f6f2d" path="/var/lib/kubelet/pods/88af8b16-da55-45bc-b04a-38984d9f6f2d/volumes" Nov 29 04:39:41 crc kubenswrapper[4631]: I1129 04:39:41.230024 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcc88877-f1aa-4a40-b362-4b95ee7f4a72" path="/var/lib/kubelet/pods/bcc88877-f1aa-4a40-b362-4b95ee7f4a72/volumes" Nov 29 04:39:42 crc kubenswrapper[4631]: I1129 04:39:42.034858 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-c306-account-create-update-wh7pg"] Nov 29 04:39:42 crc kubenswrapper[4631]: I1129 04:39:42.045720 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-c306-account-create-update-wh7pg"] Nov 29 04:39:43 crc kubenswrapper[4631]: I1129 04:39:43.231867 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0d28ea4-ab7d-4765-88a9-f889c935c418" path="/var/lib/kubelet/pods/e0d28ea4-ab7d-4765-88a9-f889c935c418/volumes" Nov 29 04:39:45 crc kubenswrapper[4631]: I1129 04:39:45.216450 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:39:45 crc kubenswrapper[4631]: E1129 04:39:45.216756 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.045028 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1934-account-create-update-twc59"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.052084 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-f5pvv"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.068401 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7566-account-create-update-wrcb6"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.088133 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-28fd-account-create-update-wj22h"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.096380 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-m57j4"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.102667 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-f5pvv"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.109511 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7566-account-create-update-wrcb6"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.116400 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-1934-account-create-update-twc59"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.123462 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-m57j4"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.130150 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-28fd-account-create-update-wj22h"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.136493 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-tlnb9"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.144425 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-tlnb9"] Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.225958 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0940a4d9-6460-4e3e-91c2-b84ac32e33c4" path="/var/lib/kubelet/pods/0940a4d9-6460-4e3e-91c2-b84ac32e33c4/volumes" Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.227058 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e71ff62-6937-453f-9add-da82958c3990" path="/var/lib/kubelet/pods/2e71ff62-6937-453f-9add-da82958c3990/volumes" Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.228369 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="342b6f95-d7bc-491b-b9bf-a218b7825807" path="/var/lib/kubelet/pods/342b6f95-d7bc-491b-b9bf-a218b7825807/volumes" Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.229822 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="627bd4eb-a3ae-4a48-bccf-e65734ff396e" path="/var/lib/kubelet/pods/627bd4eb-a3ae-4a48-bccf-e65734ff396e/volumes" Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.230610 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8734e05c-9806-444b-b0c7-31d795cc4e8a" path="/var/lib/kubelet/pods/8734e05c-9806-444b-b0c7-31d795cc4e8a/volumes" Nov 29 04:39:47 crc kubenswrapper[4631]: I1129 04:39:47.231472 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c103a4b-4b90-43c6-838f-8a71fb909eaa" path="/var/lib/kubelet/pods/9c103a4b-4b90-43c6-838f-8a71fb909eaa/volumes" Nov 29 04:39:54 crc kubenswrapper[4631]: I1129 04:39:54.748764 4631 generic.go:334] "Generic (PLEG): container finished" podID="c7c0f56e-3925-47a4-9516-9c9d662540db" containerID="2d33e8f87321485aa3a3ab37f1258fd0b8e1b856be33b17e1d7864278bf6ffea" exitCode=0 Nov 29 04:39:54 crc kubenswrapper[4631]: I1129 04:39:54.748843 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" event={"ID":"c7c0f56e-3925-47a4-9516-9c9d662540db","Type":"ContainerDied","Data":"2d33e8f87321485aa3a3ab37f1258fd0b8e1b856be33b17e1d7864278bf6ffea"} Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.216222 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:39:56 crc kubenswrapper[4631]: E1129 04:39:56.216901 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.253690 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.444659 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-inventory\") pod \"c7c0f56e-3925-47a4-9516-9c9d662540db\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.444872 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7hg7\" (UniqueName: \"kubernetes.io/projected/c7c0f56e-3925-47a4-9516-9c9d662540db-kube-api-access-v7hg7\") pod \"c7c0f56e-3925-47a4-9516-9c9d662540db\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.445023 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-ssh-key\") pod \"c7c0f56e-3925-47a4-9516-9c9d662540db\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.445087 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-bootstrap-combined-ca-bundle\") pod \"c7c0f56e-3925-47a4-9516-9c9d662540db\" (UID: \"c7c0f56e-3925-47a4-9516-9c9d662540db\") " Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.449620 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "c7c0f56e-3925-47a4-9516-9c9d662540db" (UID: "c7c0f56e-3925-47a4-9516-9c9d662540db"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.455381 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7c0f56e-3925-47a4-9516-9c9d662540db-kube-api-access-v7hg7" (OuterVolumeSpecName: "kube-api-access-v7hg7") pod "c7c0f56e-3925-47a4-9516-9c9d662540db" (UID: "c7c0f56e-3925-47a4-9516-9c9d662540db"). InnerVolumeSpecName "kube-api-access-v7hg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.474478 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c7c0f56e-3925-47a4-9516-9c9d662540db" (UID: "c7c0f56e-3925-47a4-9516-9c9d662540db"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.484353 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-inventory" (OuterVolumeSpecName: "inventory") pod "c7c0f56e-3925-47a4-9516-9c9d662540db" (UID: "c7c0f56e-3925-47a4-9516-9c9d662540db"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.547294 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.547351 4631 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.547365 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c7c0f56e-3925-47a4-9516-9c9d662540db-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.547376 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7hg7\" (UniqueName: \"kubernetes.io/projected/c7c0f56e-3925-47a4-9516-9c9d662540db-kube-api-access-v7hg7\") on node \"crc\" DevicePath \"\"" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.773845 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" event={"ID":"c7c0f56e-3925-47a4-9516-9c9d662540db","Type":"ContainerDied","Data":"1d3ec5215f846d1b18d754f84b69fe64313fe582a8ce2c2df626c13404e7bf9a"} Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.773887 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d3ec5215f846d1b18d754f84b69fe64313fe582a8ce2c2df626c13404e7bf9a" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.773937 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866232 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj"] Nov 29 04:39:56 crc kubenswrapper[4631]: E1129 04:39:56.866568 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="679fb048-3d2d-4024-89d2-a28700283b60" containerName="extract-content" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866580 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="679fb048-3d2d-4024-89d2-a28700283b60" containerName="extract-content" Nov 29 04:39:56 crc kubenswrapper[4631]: E1129 04:39:56.866599 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="679fb048-3d2d-4024-89d2-a28700283b60" containerName="registry-server" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866605 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="679fb048-3d2d-4024-89d2-a28700283b60" containerName="registry-server" Nov 29 04:39:56 crc kubenswrapper[4631]: E1129 04:39:56.866620 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7c0f56e-3925-47a4-9516-9c9d662540db" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866627 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7c0f56e-3925-47a4-9516-9c9d662540db" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 29 04:39:56 crc kubenswrapper[4631]: E1129 04:39:56.866634 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerName="extract-utilities" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866639 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerName="extract-utilities" Nov 29 04:39:56 crc kubenswrapper[4631]: E1129 04:39:56.866654 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerName="extract-content" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866659 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerName="extract-content" Nov 29 04:39:56 crc kubenswrapper[4631]: E1129 04:39:56.866671 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerName="registry-server" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866677 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerName="registry-server" Nov 29 04:39:56 crc kubenswrapper[4631]: E1129 04:39:56.866687 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="679fb048-3d2d-4024-89d2-a28700283b60" containerName="extract-utilities" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866694 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="679fb048-3d2d-4024-89d2-a28700283b60" containerName="extract-utilities" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866857 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="679fb048-3d2d-4024-89d2-a28700283b60" containerName="registry-server" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866877 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7c0f56e-3925-47a4-9516-9c9d662540db" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.866891 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f26e2f5-b872-435d-8af4-573dff4aa342" containerName="registry-server" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.867451 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.869622 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.870061 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.871285 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.871659 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:39:56 crc kubenswrapper[4631]: I1129 04:39:56.887916 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj"] Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.057502 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4c9hg\" (UniqueName: \"kubernetes.io/projected/919eea43-11e7-42f0-8d23-e46e7cbc5359-kube-api-access-4c9hg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.057776 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.057887 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.159055 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4c9hg\" (UniqueName: \"kubernetes.io/projected/919eea43-11e7-42f0-8d23-e46e7cbc5359-kube-api-access-4c9hg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.159103 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.159135 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.166271 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.168432 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.175750 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4c9hg\" (UniqueName: \"kubernetes.io/projected/919eea43-11e7-42f0-8d23-e46e7cbc5359-kube-api-access-4c9hg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.182190 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.754156 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj"] Nov 29 04:39:57 crc kubenswrapper[4631]: W1129 04:39:57.755813 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod919eea43_11e7_42f0_8d23_e46e7cbc5359.slice/crio-643a659d2b76846253cb534ac3e2891cd39540bda48439193b59be93bf7243fa WatchSource:0}: Error finding container 643a659d2b76846253cb534ac3e2891cd39540bda48439193b59be93bf7243fa: Status 404 returned error can't find the container with id 643a659d2b76846253cb534ac3e2891cd39540bda48439193b59be93bf7243fa Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.759455 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 04:39:57 crc kubenswrapper[4631]: I1129 04:39:57.799108 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" event={"ID":"919eea43-11e7-42f0-8d23-e46e7cbc5359","Type":"ContainerStarted","Data":"643a659d2b76846253cb534ac3e2891cd39540bda48439193b59be93bf7243fa"} Nov 29 04:39:58 crc kubenswrapper[4631]: I1129 04:39:58.811262 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" event={"ID":"919eea43-11e7-42f0-8d23-e46e7cbc5359","Type":"ContainerStarted","Data":"acf441250c57037d33c58c949f7b3b85cd238c485991ff041b818f28bb0920ab"} Nov 29 04:39:58 crc kubenswrapper[4631]: I1129 04:39:58.849710 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" podStartSLOduration=2.143833864 podStartE2EDuration="2.849677966s" podCreationTimestamp="2025-11-29 04:39:56 +0000 UTC" firstStartedPulling="2025-11-29 04:39:57.759172877 +0000 UTC m=+1724.823676401" lastFinishedPulling="2025-11-29 04:39:58.465016979 +0000 UTC m=+1725.529520503" observedRunningTime="2025-11-29 04:39:58.840895311 +0000 UTC m=+1725.905398855" watchObservedRunningTime="2025-11-29 04:39:58.849677966 +0000 UTC m=+1725.914181510" Nov 29 04:40:00 crc kubenswrapper[4631]: I1129 04:40:00.797394 4631 scope.go:117] "RemoveContainer" containerID="2b79860d20564eb7d9b1ba2ae7231e36863263c53cc01e7d849c390bf32446e5" Nov 29 04:40:00 crc kubenswrapper[4631]: I1129 04:40:00.836616 4631 scope.go:117] "RemoveContainer" containerID="16979f2296e2f251032bf77412746d907fab31551ba350fd48330eaed4ea8e56" Nov 29 04:40:00 crc kubenswrapper[4631]: I1129 04:40:00.887771 4631 scope.go:117] "RemoveContainer" containerID="1ef80e39cf89ec44b4431dce81708dc24d232b7c235a92534c1632f9ec14aecb" Nov 29 04:40:00 crc kubenswrapper[4631]: I1129 04:40:00.934698 4631 scope.go:117] "RemoveContainer" containerID="984a38512a137d418dc5b16af7d191cc94ae7a9877c631b7d21c274c4a0ed842" Nov 29 04:40:00 crc kubenswrapper[4631]: I1129 04:40:00.980113 4631 scope.go:117] "RemoveContainer" containerID="d7f7f7e88a0df25271213e207f849c6868f8f84acfe6f2ee4bf2b9a621991426" Nov 29 04:40:01 crc kubenswrapper[4631]: I1129 04:40:01.044502 4631 scope.go:117] "RemoveContainer" containerID="e0b00c9c253ac2fe51f730864292f14c196fbd78e3468780a9281d86fd3a80af" Nov 29 04:40:01 crc kubenswrapper[4631]: I1129 04:40:01.074580 4631 scope.go:117] "RemoveContainer" containerID="9014280ee4e0991c7227bba0ca2606123046d66eaf690ba1c7d7e324f9d6e803" Nov 29 04:40:01 crc kubenswrapper[4631]: I1129 04:40:01.096220 4631 scope.go:117] "RemoveContainer" containerID="3fe56cb79c27c74bba1fb316dd45785f46b530ec775b7ba9925847fdd6291e11" Nov 29 04:40:01 crc kubenswrapper[4631]: I1129 04:40:01.118253 4631 scope.go:117] "RemoveContainer" containerID="5cab43d7561ccbc5d714b54ffc7519562ef28babace98df339823fa7c8de3ae8" Nov 29 04:40:01 crc kubenswrapper[4631]: I1129 04:40:01.136813 4631 scope.go:117] "RemoveContainer" containerID="2ad1bbb050894d1934ec8cb54b9d09052e5eb3c932a8cf7cd20e0d87eb85d6ec" Nov 29 04:40:01 crc kubenswrapper[4631]: I1129 04:40:01.162215 4631 scope.go:117] "RemoveContainer" containerID="ba7dca68b6cabbbd19beb62ab3c17bf131be9ab64450ef6fdc7e27b2f680fab7" Nov 29 04:40:01 crc kubenswrapper[4631]: I1129 04:40:01.180748 4631 scope.go:117] "RemoveContainer" containerID="83e083ddb0c739a58651a4a78580d188e9e361ea09cdecdee97cfdb17bd959f4" Nov 29 04:40:08 crc kubenswrapper[4631]: I1129 04:40:08.216476 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:40:08 crc kubenswrapper[4631]: E1129 04:40:08.217599 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:40:12 crc kubenswrapper[4631]: I1129 04:40:12.048545 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-w9m8l"] Nov 29 04:40:12 crc kubenswrapper[4631]: I1129 04:40:12.059281 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-w9m8l"] Nov 29 04:40:13 crc kubenswrapper[4631]: I1129 04:40:13.240128 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="425113ab-54e0-4372-bdd1-587e2dc743d2" path="/var/lib/kubelet/pods/425113ab-54e0-4372-bdd1-587e2dc743d2/volumes" Nov 29 04:40:21 crc kubenswrapper[4631]: I1129 04:40:21.216535 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:40:21 crc kubenswrapper[4631]: E1129 04:40:21.217251 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:40:33 crc kubenswrapper[4631]: I1129 04:40:33.221728 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:40:33 crc kubenswrapper[4631]: E1129 04:40:33.222298 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:40:48 crc kubenswrapper[4631]: I1129 04:40:48.217627 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:40:48 crc kubenswrapper[4631]: E1129 04:40:48.220486 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:40:53 crc kubenswrapper[4631]: I1129 04:40:53.056153 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-7882p"] Nov 29 04:40:53 crc kubenswrapper[4631]: I1129 04:40:53.063413 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-7882p"] Nov 29 04:40:53 crc kubenswrapper[4631]: I1129 04:40:53.071118 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-57lmq"] Nov 29 04:40:53 crc kubenswrapper[4631]: I1129 04:40:53.087698 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-57lmq"] Nov 29 04:40:53 crc kubenswrapper[4631]: I1129 04:40:53.236241 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30557b8c-2204-4118-a123-8fb42dc36b19" path="/var/lib/kubelet/pods/30557b8c-2204-4118-a123-8fb42dc36b19/volumes" Nov 29 04:40:53 crc kubenswrapper[4631]: I1129 04:40:53.238645 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6cdce96-7bd4-45c6-9597-6196ceee67ef" path="/var/lib/kubelet/pods/c6cdce96-7bd4-45c6-9597-6196ceee67ef/volumes" Nov 29 04:41:01 crc kubenswrapper[4631]: I1129 04:41:01.408583 4631 scope.go:117] "RemoveContainer" containerID="9d113d2b9e9971ad81ce0e829fe55922228723302c79bc06b73da6244ca7728a" Nov 29 04:41:01 crc kubenswrapper[4631]: I1129 04:41:01.459111 4631 scope.go:117] "RemoveContainer" containerID="ae5b2bdd4d4687b9e130c1e5e7c365ad1d8e48fd67ea533d600227b1a2db6a1b" Nov 29 04:41:01 crc kubenswrapper[4631]: I1129 04:41:01.534645 4631 scope.go:117] "RemoveContainer" containerID="327d9bd07e47cce7a30e94aafe53f69e85f72d046d226fe2c24490ed612194c9" Nov 29 04:41:01 crc kubenswrapper[4631]: I1129 04:41:01.562598 4631 scope.go:117] "RemoveContainer" containerID="ba3b9807761eda10e3dfe76eed243b5c29abedbdc9d4902d9c1b0da9b6415c78" Nov 29 04:41:01 crc kubenswrapper[4631]: I1129 04:41:01.599390 4631 scope.go:117] "RemoveContainer" containerID="98f356801da8af3ca590eb19610b34ddf7ed1e48357bd2a5e63877f7c2c774c5" Nov 29 04:41:01 crc kubenswrapper[4631]: I1129 04:41:01.644178 4631 scope.go:117] "RemoveContainer" containerID="c2ecc7629be0682c7c613617a8f99b8f0064d68c787d06d8e70a2c6981809c38" Nov 29 04:41:03 crc kubenswrapper[4631]: I1129 04:41:03.228488 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:41:03 crc kubenswrapper[4631]: E1129 04:41:03.229166 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:41:07 crc kubenswrapper[4631]: I1129 04:41:07.078012 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-jmcnk"] Nov 29 04:41:07 crc kubenswrapper[4631]: I1129 04:41:07.096242 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-jmcnk"] Nov 29 04:41:07 crc kubenswrapper[4631]: I1129 04:41:07.231638 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a612579-d131-4dbd-85bc-ba455a26db3b" path="/var/lib/kubelet/pods/8a612579-d131-4dbd-85bc-ba455a26db3b/volumes" Nov 29 04:41:08 crc kubenswrapper[4631]: I1129 04:41:08.045818 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-6bzb6"] Nov 29 04:41:08 crc kubenswrapper[4631]: I1129 04:41:08.055320 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-6bzb6"] Nov 29 04:41:09 crc kubenswrapper[4631]: I1129 04:41:09.237629 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d744ff5-22a8-445d-a1f9-a7fd1030d5ed" path="/var/lib/kubelet/pods/4d744ff5-22a8-445d-a1f9-a7fd1030d5ed/volumes" Nov 29 04:41:12 crc kubenswrapper[4631]: I1129 04:41:12.040623 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-4zg2x"] Nov 29 04:41:12 crc kubenswrapper[4631]: I1129 04:41:12.051049 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-4zg2x"] Nov 29 04:41:13 crc kubenswrapper[4631]: I1129 04:41:13.244618 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77e723d0-49bf-4008-bbce-7c1fe2ad3a5d" path="/var/lib/kubelet/pods/77e723d0-49bf-4008-bbce-7c1fe2ad3a5d/volumes" Nov 29 04:41:18 crc kubenswrapper[4631]: I1129 04:41:18.216582 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:41:18 crc kubenswrapper[4631]: E1129 04:41:18.217462 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:41:30 crc kubenswrapper[4631]: I1129 04:41:30.216880 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:41:30 crc kubenswrapper[4631]: E1129 04:41:30.217852 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:41:32 crc kubenswrapper[4631]: I1129 04:41:32.058294 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-cztsz"] Nov 29 04:41:32 crc kubenswrapper[4631]: I1129 04:41:32.077563 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-cztsz"] Nov 29 04:41:33 crc kubenswrapper[4631]: I1129 04:41:33.227139 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d646890b-5054-4ad5-9dc0-940a5e397fd0" path="/var/lib/kubelet/pods/d646890b-5054-4ad5-9dc0-940a5e397fd0/volumes" Nov 29 04:41:41 crc kubenswrapper[4631]: I1129 04:41:41.217075 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:41:41 crc kubenswrapper[4631]: E1129 04:41:41.218109 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:41:44 crc kubenswrapper[4631]: I1129 04:41:44.031074 4631 generic.go:334] "Generic (PLEG): container finished" podID="919eea43-11e7-42f0-8d23-e46e7cbc5359" containerID="acf441250c57037d33c58c949f7b3b85cd238c485991ff041b818f28bb0920ab" exitCode=0 Nov 29 04:41:44 crc kubenswrapper[4631]: I1129 04:41:44.031268 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" event={"ID":"919eea43-11e7-42f0-8d23-e46e7cbc5359","Type":"ContainerDied","Data":"acf441250c57037d33c58c949f7b3b85cd238c485991ff041b818f28bb0920ab"} Nov 29 04:41:45 crc kubenswrapper[4631]: I1129 04:41:45.496308 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:41:45 crc kubenswrapper[4631]: I1129 04:41:45.692570 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4c9hg\" (UniqueName: \"kubernetes.io/projected/919eea43-11e7-42f0-8d23-e46e7cbc5359-kube-api-access-4c9hg\") pod \"919eea43-11e7-42f0-8d23-e46e7cbc5359\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " Nov 29 04:41:45 crc kubenswrapper[4631]: I1129 04:41:45.692754 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-ssh-key\") pod \"919eea43-11e7-42f0-8d23-e46e7cbc5359\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " Nov 29 04:41:45 crc kubenswrapper[4631]: I1129 04:41:45.693035 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-inventory\") pod \"919eea43-11e7-42f0-8d23-e46e7cbc5359\" (UID: \"919eea43-11e7-42f0-8d23-e46e7cbc5359\") " Nov 29 04:41:45 crc kubenswrapper[4631]: I1129 04:41:45.698838 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/919eea43-11e7-42f0-8d23-e46e7cbc5359-kube-api-access-4c9hg" (OuterVolumeSpecName: "kube-api-access-4c9hg") pod "919eea43-11e7-42f0-8d23-e46e7cbc5359" (UID: "919eea43-11e7-42f0-8d23-e46e7cbc5359"). InnerVolumeSpecName "kube-api-access-4c9hg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:41:45 crc kubenswrapper[4631]: I1129 04:41:45.723750 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "919eea43-11e7-42f0-8d23-e46e7cbc5359" (UID: "919eea43-11e7-42f0-8d23-e46e7cbc5359"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:41:45 crc kubenswrapper[4631]: I1129 04:41:45.730137 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-inventory" (OuterVolumeSpecName: "inventory") pod "919eea43-11e7-42f0-8d23-e46e7cbc5359" (UID: "919eea43-11e7-42f0-8d23-e46e7cbc5359"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:41:45 crc kubenswrapper[4631]: I1129 04:41:45.796304 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:41:45 crc kubenswrapper[4631]: I1129 04:41:45.796380 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4c9hg\" (UniqueName: \"kubernetes.io/projected/919eea43-11e7-42f0-8d23-e46e7cbc5359-kube-api-access-4c9hg\") on node \"crc\" DevicePath \"\"" Nov 29 04:41:45 crc kubenswrapper[4631]: I1129 04:41:45.796404 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/919eea43-11e7-42f0-8d23-e46e7cbc5359-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.049722 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" event={"ID":"919eea43-11e7-42f0-8d23-e46e7cbc5359","Type":"ContainerDied","Data":"643a659d2b76846253cb534ac3e2891cd39540bda48439193b59be93bf7243fa"} Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.049766 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="643a659d2b76846253cb534ac3e2891cd39540bda48439193b59be93bf7243fa" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.049782 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.160828 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd"] Nov 29 04:41:46 crc kubenswrapper[4631]: E1129 04:41:46.161290 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="919eea43-11e7-42f0-8d23-e46e7cbc5359" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.161311 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="919eea43-11e7-42f0-8d23-e46e7cbc5359" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.161566 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="919eea43-11e7-42f0-8d23-e46e7cbc5359" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.164391 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.167348 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.173082 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.173479 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.173488 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.183126 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd"] Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.312592 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc2q9\" (UniqueName: \"kubernetes.io/projected/6cc48245-72ab-4e25-91b3-c98fe56e9869-kube-api-access-nc2q9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-26cwd\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.313030 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-26cwd\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.313249 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-26cwd\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.415518 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc2q9\" (UniqueName: \"kubernetes.io/projected/6cc48245-72ab-4e25-91b3-c98fe56e9869-kube-api-access-nc2q9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-26cwd\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.415643 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-26cwd\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.415763 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-26cwd\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.422269 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-26cwd\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.428521 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-26cwd\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.441502 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc2q9\" (UniqueName: \"kubernetes.io/projected/6cc48245-72ab-4e25-91b3-c98fe56e9869-kube-api-access-nc2q9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-26cwd\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:46 crc kubenswrapper[4631]: I1129 04:41:46.520369 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:41:47 crc kubenswrapper[4631]: I1129 04:41:47.384293 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd"] Nov 29 04:41:48 crc kubenswrapper[4631]: I1129 04:41:48.298916 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" event={"ID":"6cc48245-72ab-4e25-91b3-c98fe56e9869","Type":"ContainerStarted","Data":"8e4fb4d0d7c3b87ae219b445554a7d882300405e9547ecd7dafd999959499855"} Nov 29 04:41:49 crc kubenswrapper[4631]: I1129 04:41:49.310094 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" event={"ID":"6cc48245-72ab-4e25-91b3-c98fe56e9869","Type":"ContainerStarted","Data":"8804392f282212cd02437e1dfd84dcb8928401cebea0950b76b3ef141b5094da"} Nov 29 04:41:49 crc kubenswrapper[4631]: I1129 04:41:49.341404 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" podStartSLOduration=2.711798934 podStartE2EDuration="3.341381959s" podCreationTimestamp="2025-11-29 04:41:46 +0000 UTC" firstStartedPulling="2025-11-29 04:41:47.393708788 +0000 UTC m=+1834.458212302" lastFinishedPulling="2025-11-29 04:41:48.023291783 +0000 UTC m=+1835.087795327" observedRunningTime="2025-11-29 04:41:49.330565014 +0000 UTC m=+1836.395068538" watchObservedRunningTime="2025-11-29 04:41:49.341381959 +0000 UTC m=+1836.405885483" Nov 29 04:41:54 crc kubenswrapper[4631]: I1129 04:41:54.217140 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:41:54 crc kubenswrapper[4631]: E1129 04:41:54.218434 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:42:01 crc kubenswrapper[4631]: I1129 04:42:01.745140 4631 scope.go:117] "RemoveContainer" containerID="ae7593444ad8c1b38ef9f29f4c11f00e074a6146749f2c448e6ca53e4d84d033" Nov 29 04:42:01 crc kubenswrapper[4631]: I1129 04:42:01.800155 4631 scope.go:117] "RemoveContainer" containerID="d30c2c6a7b2349da3e750e196662bfa5b0180b9be3c541dc573adad9713b9cdb" Nov 29 04:42:01 crc kubenswrapper[4631]: I1129 04:42:01.860135 4631 scope.go:117] "RemoveContainer" containerID="9bdb5e8368f5ef2303ceaf6ab54c3259104ac54a2908cf05b3fff429ecb41a28" Nov 29 04:42:01 crc kubenswrapper[4631]: I1129 04:42:01.937458 4631 scope.go:117] "RemoveContainer" containerID="bc62497bf619cacb0fdf8dcd7974505a74ab66a52fdbe5b93e3b60a31820e5e5" Nov 29 04:42:07 crc kubenswrapper[4631]: I1129 04:42:07.217020 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:42:07 crc kubenswrapper[4631]: E1129 04:42:07.218075 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.076232 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-aa70-account-create-update-rwxwf"] Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.093227 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-s6nv9"] Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.104556 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-aa70-account-create-update-rwxwf"] Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.114253 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-2d74-account-create-update-r2knb"] Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.123251 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-2d74-account-create-update-r2knb"] Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.129130 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-s6nv9"] Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.134988 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-pv5l9"] Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.140637 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-pv5l9"] Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.237984 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e666e44-54c6-4b2d-9181-16f640203eff" path="/var/lib/kubelet/pods/3e666e44-54c6-4b2d-9181-16f640203eff/volumes" Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.239543 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55e05641-c7df-46fe-9a80-58539d8980ef" path="/var/lib/kubelet/pods/55e05641-c7df-46fe-9a80-58539d8980ef/volumes" Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.240593 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8591303e-f15b-46c9-bdef-47d5d4cdde4e" path="/var/lib/kubelet/pods/8591303e-f15b-46c9-bdef-47d5d4cdde4e/volumes" Nov 29 04:42:19 crc kubenswrapper[4631]: I1129 04:42:19.241631 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c145c685-b279-4265-b673-a3b6e95fcf38" path="/var/lib/kubelet/pods/c145c685-b279-4265-b673-a3b6e95fcf38/volumes" Nov 29 04:42:20 crc kubenswrapper[4631]: I1129 04:42:20.043114 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-5551-account-create-update-7cf8w"] Nov 29 04:42:20 crc kubenswrapper[4631]: I1129 04:42:20.057452 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-b4692"] Nov 29 04:42:20 crc kubenswrapper[4631]: I1129 04:42:20.064259 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-5551-account-create-update-7cf8w"] Nov 29 04:42:20 crc kubenswrapper[4631]: I1129 04:42:20.070128 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-b4692"] Nov 29 04:42:21 crc kubenswrapper[4631]: I1129 04:42:21.217801 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:42:21 crc kubenswrapper[4631]: E1129 04:42:21.218645 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:42:21 crc kubenswrapper[4631]: I1129 04:42:21.240232 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5607b995-0a8c-43a8-b5f7-116f11d800a8" path="/var/lib/kubelet/pods/5607b995-0a8c-43a8-b5f7-116f11d800a8/volumes" Nov 29 04:42:21 crc kubenswrapper[4631]: I1129 04:42:21.241469 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71733be1-1b42-43f0-8154-997d8b5f800f" path="/var/lib/kubelet/pods/71733be1-1b42-43f0-8154-997d8b5f800f/volumes" Nov 29 04:42:34 crc kubenswrapper[4631]: I1129 04:42:34.217678 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:42:34 crc kubenswrapper[4631]: E1129 04:42:34.218578 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:42:45 crc kubenswrapper[4631]: I1129 04:42:45.216187 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:42:45 crc kubenswrapper[4631]: E1129 04:42:45.216967 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:42:53 crc kubenswrapper[4631]: I1129 04:42:53.084007 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-cb52v"] Nov 29 04:42:53 crc kubenswrapper[4631]: I1129 04:42:53.105416 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-cb52v"] Nov 29 04:42:53 crc kubenswrapper[4631]: I1129 04:42:53.230485 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4a6498b-07c7-4d19-b5a3-49773fa023a7" path="/var/lib/kubelet/pods/e4a6498b-07c7-4d19-b5a3-49773fa023a7/volumes" Nov 29 04:43:00 crc kubenswrapper[4631]: I1129 04:43:00.216636 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:43:00 crc kubenswrapper[4631]: E1129 04:43:00.217707 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:43:02 crc kubenswrapper[4631]: I1129 04:43:02.069418 4631 scope.go:117] "RemoveContainer" containerID="fcb5e613cb6880f4d89e4f7392aebe0b2a0c42956d2ec100965073726021bad2" Nov 29 04:43:02 crc kubenswrapper[4631]: I1129 04:43:02.141324 4631 scope.go:117] "RemoveContainer" containerID="f4ba32e188f4697a94b502e4e6a9a68b09e9a7c5abaf290d07ad717f6b6c0345" Nov 29 04:43:02 crc kubenswrapper[4631]: I1129 04:43:02.182565 4631 scope.go:117] "RemoveContainer" containerID="b33b30a8200e4e8a6ad802cb260e256ac69f6c85bcd74b142d3de83bfa2e6199" Nov 29 04:43:02 crc kubenswrapper[4631]: I1129 04:43:02.237190 4631 scope.go:117] "RemoveContainer" containerID="d46e8493d02ad1167e29938e31d0b1634a0c3d0cb1c153a7aa0094bcab49b8dd" Nov 29 04:43:02 crc kubenswrapper[4631]: I1129 04:43:02.266419 4631 scope.go:117] "RemoveContainer" containerID="100c6e5c68f68bdeefe2f7b5ce33c8a3b8ac9bb9df2b102c7cfda3bac6b9b029" Nov 29 04:43:02 crc kubenswrapper[4631]: I1129 04:43:02.320764 4631 scope.go:117] "RemoveContainer" containerID="86452593e1b36b12ca2c66a4cc29f596766a4e6d152d29c4a7313de5055be64f" Nov 29 04:43:02 crc kubenswrapper[4631]: I1129 04:43:02.354262 4631 scope.go:117] "RemoveContainer" containerID="523a9b297b5cf53c00f0e2ae87ca875539ed5c6ce7e3ca78ae7bdce709e2d2b8" Nov 29 04:43:08 crc kubenswrapper[4631]: I1129 04:43:08.193077 4631 generic.go:334] "Generic (PLEG): container finished" podID="6cc48245-72ab-4e25-91b3-c98fe56e9869" containerID="8804392f282212cd02437e1dfd84dcb8928401cebea0950b76b3ef141b5094da" exitCode=0 Nov 29 04:43:08 crc kubenswrapper[4631]: I1129 04:43:08.193208 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" event={"ID":"6cc48245-72ab-4e25-91b3-c98fe56e9869","Type":"ContainerDied","Data":"8804392f282212cd02437e1dfd84dcb8928401cebea0950b76b3ef141b5094da"} Nov 29 04:43:09 crc kubenswrapper[4631]: I1129 04:43:09.782140 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:43:09 crc kubenswrapper[4631]: I1129 04:43:09.894044 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nc2q9\" (UniqueName: \"kubernetes.io/projected/6cc48245-72ab-4e25-91b3-c98fe56e9869-kube-api-access-nc2q9\") pod \"6cc48245-72ab-4e25-91b3-c98fe56e9869\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " Nov 29 04:43:09 crc kubenswrapper[4631]: I1129 04:43:09.894110 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-ssh-key\") pod \"6cc48245-72ab-4e25-91b3-c98fe56e9869\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " Nov 29 04:43:09 crc kubenswrapper[4631]: I1129 04:43:09.894482 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-inventory\") pod \"6cc48245-72ab-4e25-91b3-c98fe56e9869\" (UID: \"6cc48245-72ab-4e25-91b3-c98fe56e9869\") " Nov 29 04:43:09 crc kubenswrapper[4631]: I1129 04:43:09.903087 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cc48245-72ab-4e25-91b3-c98fe56e9869-kube-api-access-nc2q9" (OuterVolumeSpecName: "kube-api-access-nc2q9") pod "6cc48245-72ab-4e25-91b3-c98fe56e9869" (UID: "6cc48245-72ab-4e25-91b3-c98fe56e9869"). InnerVolumeSpecName "kube-api-access-nc2q9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:43:09 crc kubenswrapper[4631]: I1129 04:43:09.928962 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-inventory" (OuterVolumeSpecName: "inventory") pod "6cc48245-72ab-4e25-91b3-c98fe56e9869" (UID: "6cc48245-72ab-4e25-91b3-c98fe56e9869"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:43:09 crc kubenswrapper[4631]: I1129 04:43:09.940774 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6cc48245-72ab-4e25-91b3-c98fe56e9869" (UID: "6cc48245-72ab-4e25-91b3-c98fe56e9869"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:43:09 crc kubenswrapper[4631]: I1129 04:43:09.998373 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:43:09 crc kubenswrapper[4631]: I1129 04:43:09.998448 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nc2q9\" (UniqueName: \"kubernetes.io/projected/6cc48245-72ab-4e25-91b3-c98fe56e9869-kube-api-access-nc2q9\") on node \"crc\" DevicePath \"\"" Nov 29 04:43:09 crc kubenswrapper[4631]: I1129 04:43:09.998478 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6cc48245-72ab-4e25-91b3-c98fe56e9869-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.218440 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" event={"ID":"6cc48245-72ab-4e25-91b3-c98fe56e9869","Type":"ContainerDied","Data":"8e4fb4d0d7c3b87ae219b445554a7d882300405e9547ecd7dafd999959499855"} Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.218477 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e4fb4d0d7c3b87ae219b445554a7d882300405e9547ecd7dafd999959499855" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.218521 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-26cwd" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.331285 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn"] Nov 29 04:43:10 crc kubenswrapper[4631]: E1129 04:43:10.332805 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cc48245-72ab-4e25-91b3-c98fe56e9869" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.332822 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cc48245-72ab-4e25-91b3-c98fe56e9869" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.333017 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cc48245-72ab-4e25-91b3-c98fe56e9869" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.333568 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.336269 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.337148 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.337580 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.339122 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.359269 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn"] Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.510149 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.510223 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.510249 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdzmt\" (UniqueName: \"kubernetes.io/projected/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-kube-api-access-wdzmt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.613151 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.613242 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdzmt\" (UniqueName: \"kubernetes.io/projected/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-kube-api-access-wdzmt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.613358 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.618252 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.622176 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.644351 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdzmt\" (UniqueName: \"kubernetes.io/projected/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-kube-api-access-wdzmt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:10 crc kubenswrapper[4631]: I1129 04:43:10.650079 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:11 crc kubenswrapper[4631]: I1129 04:43:11.011322 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn"] Nov 29 04:43:11 crc kubenswrapper[4631]: I1129 04:43:11.233144 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" event={"ID":"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93","Type":"ContainerStarted","Data":"7ac7bab001bcb876306b57ec1ff62ac88260b5f2a84e77240ec1ccff48ed8979"} Nov 29 04:43:12 crc kubenswrapper[4631]: I1129 04:43:12.243563 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" event={"ID":"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93","Type":"ContainerStarted","Data":"161a24f494fd5f57d2c77db24ccc4260818ca40dec9027e7d933dafffbbba5f9"} Nov 29 04:43:13 crc kubenswrapper[4631]: I1129 04:43:13.223634 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:43:13 crc kubenswrapper[4631]: E1129 04:43:13.224076 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:43:18 crc kubenswrapper[4631]: I1129 04:43:18.346772 4631 generic.go:334] "Generic (PLEG): container finished" podID="b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93" containerID="161a24f494fd5f57d2c77db24ccc4260818ca40dec9027e7d933dafffbbba5f9" exitCode=0 Nov 29 04:43:18 crc kubenswrapper[4631]: I1129 04:43:18.346859 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" event={"ID":"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93","Type":"ContainerDied","Data":"161a24f494fd5f57d2c77db24ccc4260818ca40dec9027e7d933dafffbbba5f9"} Nov 29 04:43:19 crc kubenswrapper[4631]: I1129 04:43:19.788836 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:19 crc kubenswrapper[4631]: I1129 04:43:19.825052 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-inventory\") pod \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " Nov 29 04:43:19 crc kubenswrapper[4631]: I1129 04:43:19.825393 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-ssh-key\") pod \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " Nov 29 04:43:19 crc kubenswrapper[4631]: I1129 04:43:19.825508 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdzmt\" (UniqueName: \"kubernetes.io/projected/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-kube-api-access-wdzmt\") pod \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\" (UID: \"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93\") " Nov 29 04:43:19 crc kubenswrapper[4631]: I1129 04:43:19.831101 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-kube-api-access-wdzmt" (OuterVolumeSpecName: "kube-api-access-wdzmt") pod "b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93" (UID: "b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93"). InnerVolumeSpecName "kube-api-access-wdzmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:43:19 crc kubenswrapper[4631]: I1129 04:43:19.854737 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-inventory" (OuterVolumeSpecName: "inventory") pod "b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93" (UID: "b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:43:19 crc kubenswrapper[4631]: I1129 04:43:19.860480 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93" (UID: "b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:43:19 crc kubenswrapper[4631]: I1129 04:43:19.928144 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdzmt\" (UniqueName: \"kubernetes.io/projected/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-kube-api-access-wdzmt\") on node \"crc\" DevicePath \"\"" Nov 29 04:43:19 crc kubenswrapper[4631]: I1129 04:43:19.928189 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:43:19 crc kubenswrapper[4631]: I1129 04:43:19.928209 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.366990 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" event={"ID":"b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93","Type":"ContainerDied","Data":"7ac7bab001bcb876306b57ec1ff62ac88260b5f2a84e77240ec1ccff48ed8979"} Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.367048 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ac7bab001bcb876306b57ec1ff62ac88260b5f2a84e77240ec1ccff48ed8979" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.367150 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.462550 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz"] Nov 29 04:43:20 crc kubenswrapper[4631]: E1129 04:43:20.462906 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.462925 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.463089 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.463684 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.465661 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.469357 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.471673 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.472400 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.480915 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz"] Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.538886 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6jptz\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.538956 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6jptz\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.539101 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2kzn\" (UniqueName: \"kubernetes.io/projected/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-kube-api-access-v2kzn\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6jptz\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.640578 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2kzn\" (UniqueName: \"kubernetes.io/projected/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-kube-api-access-v2kzn\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6jptz\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.640750 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6jptz\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.640773 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6jptz\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.645726 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6jptz\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.650305 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6jptz\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.661896 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2kzn\" (UniqueName: \"kubernetes.io/projected/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-kube-api-access-v2kzn\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6jptz\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:20 crc kubenswrapper[4631]: I1129 04:43:20.778102 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:43:21 crc kubenswrapper[4631]: I1129 04:43:21.391271 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz"] Nov 29 04:43:22 crc kubenswrapper[4631]: I1129 04:43:22.402358 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" event={"ID":"cf5098d8-d84f-4749-87ad-6772a3ac8b4e","Type":"ContainerStarted","Data":"73ad0a975f99dbfb7039825bec15638f3a889a7ee27f3d4089dff9a781853fdf"} Nov 29 04:43:22 crc kubenswrapper[4631]: I1129 04:43:22.402667 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" event={"ID":"cf5098d8-d84f-4749-87ad-6772a3ac8b4e","Type":"ContainerStarted","Data":"9f14c0081388753ddb8eed544dc7c89304c87b5c812b0a105c7ec4890d6c6fb1"} Nov 29 04:43:22 crc kubenswrapper[4631]: I1129 04:43:22.429319 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" podStartSLOduration=1.9539202900000001 podStartE2EDuration="2.42929745s" podCreationTimestamp="2025-11-29 04:43:20 +0000 UTC" firstStartedPulling="2025-11-29 04:43:21.389945916 +0000 UTC m=+1928.454449440" lastFinishedPulling="2025-11-29 04:43:21.865323086 +0000 UTC m=+1928.929826600" observedRunningTime="2025-11-29 04:43:22.427990608 +0000 UTC m=+1929.492494162" watchObservedRunningTime="2025-11-29 04:43:22.42929745 +0000 UTC m=+1929.493801004" Nov 29 04:43:23 crc kubenswrapper[4631]: I1129 04:43:23.051502 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-4vzq5"] Nov 29 04:43:23 crc kubenswrapper[4631]: I1129 04:43:23.063004 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-4vzq5"] Nov 29 04:43:23 crc kubenswrapper[4631]: I1129 04:43:23.230963 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7da671c0-0a91-41f8-9c7c-a128b5f080d4" path="/var/lib/kubelet/pods/7da671c0-0a91-41f8-9c7c-a128b5f080d4/volumes" Nov 29 04:43:24 crc kubenswrapper[4631]: I1129 04:43:24.216977 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:43:24 crc kubenswrapper[4631]: E1129 04:43:24.217277 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:43:37 crc kubenswrapper[4631]: I1129 04:43:37.247021 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:43:37 crc kubenswrapper[4631]: E1129 04:43:37.247921 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:43:38 crc kubenswrapper[4631]: I1129 04:43:38.066930 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9mvkm"] Nov 29 04:43:38 crc kubenswrapper[4631]: I1129 04:43:38.079049 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9mvkm"] Nov 29 04:43:39 crc kubenswrapper[4631]: I1129 04:43:39.230696 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7e732a2-0a21-4bd8-af75-1ac34236fa2d" path="/var/lib/kubelet/pods/b7e732a2-0a21-4bd8-af75-1ac34236fa2d/volumes" Nov 29 04:43:52 crc kubenswrapper[4631]: I1129 04:43:52.217316 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:43:52 crc kubenswrapper[4631]: E1129 04:43:52.218326 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:43:59 crc kubenswrapper[4631]: I1129 04:43:59.052001 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-kbfj8"] Nov 29 04:43:59 crc kubenswrapper[4631]: I1129 04:43:59.057896 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-kbfj8"] Nov 29 04:43:59 crc kubenswrapper[4631]: I1129 04:43:59.227975 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7877ec7-2f8b-4482-9a82-7a37e8c0ad47" path="/var/lib/kubelet/pods/a7877ec7-2f8b-4482-9a82-7a37e8c0ad47/volumes" Nov 29 04:44:02 crc kubenswrapper[4631]: I1129 04:44:02.515832 4631 scope.go:117] "RemoveContainer" containerID="9358c3a7d53991a3e5272bf14397fb1d5550a01dd56b7b2b7cfc9f7a881b0bfe" Nov 29 04:44:02 crc kubenswrapper[4631]: I1129 04:44:02.568539 4631 scope.go:117] "RemoveContainer" containerID="aa6e96e21d1993f9690106968d034fd343e937c4e57d7712794aec9ed5083386" Nov 29 04:44:02 crc kubenswrapper[4631]: I1129 04:44:02.671116 4631 scope.go:117] "RemoveContainer" containerID="5cf5559bf9fe3fea6ccad5f20cfebddd0d91377639ad29c9f62483d1074a032b" Nov 29 04:44:03 crc kubenswrapper[4631]: I1129 04:44:03.227613 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:44:03 crc kubenswrapper[4631]: E1129 04:44:03.228147 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:44:08 crc kubenswrapper[4631]: I1129 04:44:08.893553 4631 generic.go:334] "Generic (PLEG): container finished" podID="cf5098d8-d84f-4749-87ad-6772a3ac8b4e" containerID="73ad0a975f99dbfb7039825bec15638f3a889a7ee27f3d4089dff9a781853fdf" exitCode=0 Nov 29 04:44:08 crc kubenswrapper[4631]: I1129 04:44:08.893619 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" event={"ID":"cf5098d8-d84f-4749-87ad-6772a3ac8b4e","Type":"ContainerDied","Data":"73ad0a975f99dbfb7039825bec15638f3a889a7ee27f3d4089dff9a781853fdf"} Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.390879 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.487506 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-inventory\") pod \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.487643 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2kzn\" (UniqueName: \"kubernetes.io/projected/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-kube-api-access-v2kzn\") pod \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.487722 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-ssh-key\") pod \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\" (UID: \"cf5098d8-d84f-4749-87ad-6772a3ac8b4e\") " Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.497667 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-kube-api-access-v2kzn" (OuterVolumeSpecName: "kube-api-access-v2kzn") pod "cf5098d8-d84f-4749-87ad-6772a3ac8b4e" (UID: "cf5098d8-d84f-4749-87ad-6772a3ac8b4e"). InnerVolumeSpecName "kube-api-access-v2kzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.520160 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-inventory" (OuterVolumeSpecName: "inventory") pod "cf5098d8-d84f-4749-87ad-6772a3ac8b4e" (UID: "cf5098d8-d84f-4749-87ad-6772a3ac8b4e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.525545 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cf5098d8-d84f-4749-87ad-6772a3ac8b4e" (UID: "cf5098d8-d84f-4749-87ad-6772a3ac8b4e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.590579 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.590630 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2kzn\" (UniqueName: \"kubernetes.io/projected/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-kube-api-access-v2kzn\") on node \"crc\" DevicePath \"\"" Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.590654 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cf5098d8-d84f-4749-87ad-6772a3ac8b4e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.923456 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" event={"ID":"cf5098d8-d84f-4749-87ad-6772a3ac8b4e","Type":"ContainerDied","Data":"9f14c0081388753ddb8eed544dc7c89304c87b5c812b0a105c7ec4890d6c6fb1"} Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.923517 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f14c0081388753ddb8eed544dc7c89304c87b5c812b0a105c7ec4890d6c6fb1" Nov 29 04:44:10 crc kubenswrapper[4631]: I1129 04:44:10.923553 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6jptz" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.062184 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5"] Nov 29 04:44:11 crc kubenswrapper[4631]: E1129 04:44:11.062690 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf5098d8-d84f-4749-87ad-6772a3ac8b4e" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.062711 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf5098d8-d84f-4749-87ad-6772a3ac8b4e" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.062980 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf5098d8-d84f-4749-87ad-6772a3ac8b4e" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.063782 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.065991 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.066199 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.066324 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.067119 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.083607 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5"] Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.105220 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvqw6\" (UniqueName: \"kubernetes.io/projected/12e84ebd-9825-4ef2-9356-626fdc73dbb8-kube-api-access-xvqw6\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.105524 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.105735 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.207389 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvqw6\" (UniqueName: \"kubernetes.io/projected/12e84ebd-9825-4ef2-9356-626fdc73dbb8-kube-api-access-xvqw6\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.207485 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.207690 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.213472 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.220808 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.223782 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvqw6\" (UniqueName: \"kubernetes.io/projected/12e84ebd-9825-4ef2-9356-626fdc73dbb8-kube-api-access-xvqw6\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.378676 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:44:11 crc kubenswrapper[4631]: I1129 04:44:11.958064 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5"] Nov 29 04:44:11 crc kubenswrapper[4631]: W1129 04:44:11.963622 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12e84ebd_9825_4ef2_9356_626fdc73dbb8.slice/crio-3b822788da17feb39f6183a1c5ff428a8b488e12c1fb9de953b0ab3d8ae62db2 WatchSource:0}: Error finding container 3b822788da17feb39f6183a1c5ff428a8b488e12c1fb9de953b0ab3d8ae62db2: Status 404 returned error can't find the container with id 3b822788da17feb39f6183a1c5ff428a8b488e12c1fb9de953b0ab3d8ae62db2 Nov 29 04:44:12 crc kubenswrapper[4631]: I1129 04:44:12.950203 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" event={"ID":"12e84ebd-9825-4ef2-9356-626fdc73dbb8","Type":"ContainerStarted","Data":"058ad9f9b0541ea06df9347043c6e5b6e2dedf931c86d6eac9673cce1e33cc99"} Nov 29 04:44:12 crc kubenswrapper[4631]: I1129 04:44:12.951194 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" event={"ID":"12e84ebd-9825-4ef2-9356-626fdc73dbb8","Type":"ContainerStarted","Data":"3b822788da17feb39f6183a1c5ff428a8b488e12c1fb9de953b0ab3d8ae62db2"} Nov 29 04:44:12 crc kubenswrapper[4631]: I1129 04:44:12.983972 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" podStartSLOduration=1.444379759 podStartE2EDuration="1.983948834s" podCreationTimestamp="2025-11-29 04:44:11 +0000 UTC" firstStartedPulling="2025-11-29 04:44:11.96654943 +0000 UTC m=+1979.031052964" lastFinishedPulling="2025-11-29 04:44:12.506118485 +0000 UTC m=+1979.570622039" observedRunningTime="2025-11-29 04:44:12.975962578 +0000 UTC m=+1980.040466142" watchObservedRunningTime="2025-11-29 04:44:12.983948834 +0000 UTC m=+1980.048452358" Nov 29 04:44:16 crc kubenswrapper[4631]: I1129 04:44:16.217011 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:44:16 crc kubenswrapper[4631]: E1129 04:44:16.218153 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.426513 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-54vg2"] Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.428786 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.432809 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-54vg2"] Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.565797 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-catalog-content\") pod \"redhat-operators-54vg2\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.566516 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-utilities\") pod \"redhat-operators-54vg2\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.566677 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rpzd\" (UniqueName: \"kubernetes.io/projected/71b89e09-9b70-4d95-a304-4b10c3400e8f-kube-api-access-8rpzd\") pod \"redhat-operators-54vg2\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.668151 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rpzd\" (UniqueName: \"kubernetes.io/projected/71b89e09-9b70-4d95-a304-4b10c3400e8f-kube-api-access-8rpzd\") pod \"redhat-operators-54vg2\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.668299 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-catalog-content\") pod \"redhat-operators-54vg2\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.668364 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-utilities\") pod \"redhat-operators-54vg2\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.668721 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-utilities\") pod \"redhat-operators-54vg2\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.668777 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-catalog-content\") pod \"redhat-operators-54vg2\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.699910 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rpzd\" (UniqueName: \"kubernetes.io/projected/71b89e09-9b70-4d95-a304-4b10c3400e8f-kube-api-access-8rpzd\") pod \"redhat-operators-54vg2\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:27 crc kubenswrapper[4631]: I1129 04:44:27.751428 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:28 crc kubenswrapper[4631]: I1129 04:44:28.178986 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-54vg2"] Nov 29 04:44:29 crc kubenswrapper[4631]: I1129 04:44:29.110100 4631 generic.go:334] "Generic (PLEG): container finished" podID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerID="d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa" exitCode=0 Nov 29 04:44:29 crc kubenswrapper[4631]: I1129 04:44:29.110181 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54vg2" event={"ID":"71b89e09-9b70-4d95-a304-4b10c3400e8f","Type":"ContainerDied","Data":"d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa"} Nov 29 04:44:29 crc kubenswrapper[4631]: I1129 04:44:29.110469 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54vg2" event={"ID":"71b89e09-9b70-4d95-a304-4b10c3400e8f","Type":"ContainerStarted","Data":"b2ed38a0b3e513b9d8cddb36dcac1cd5fbd6330dad503a4b5280822d3e76c73d"} Nov 29 04:44:29 crc kubenswrapper[4631]: I1129 04:44:29.215790 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:44:30 crc kubenswrapper[4631]: I1129 04:44:30.122556 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54vg2" event={"ID":"71b89e09-9b70-4d95-a304-4b10c3400e8f","Type":"ContainerStarted","Data":"3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa"} Nov 29 04:44:30 crc kubenswrapper[4631]: I1129 04:44:30.125556 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"91c1e60f055fd84296684a0bad54cde6b8f4cb334611b9aec94a6b3673703fab"} Nov 29 04:44:33 crc kubenswrapper[4631]: I1129 04:44:33.158406 4631 generic.go:334] "Generic (PLEG): container finished" podID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerID="3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa" exitCode=0 Nov 29 04:44:33 crc kubenswrapper[4631]: I1129 04:44:33.158486 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54vg2" event={"ID":"71b89e09-9b70-4d95-a304-4b10c3400e8f","Type":"ContainerDied","Data":"3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa"} Nov 29 04:44:35 crc kubenswrapper[4631]: I1129 04:44:35.178640 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54vg2" event={"ID":"71b89e09-9b70-4d95-a304-4b10c3400e8f","Type":"ContainerStarted","Data":"25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c"} Nov 29 04:44:35 crc kubenswrapper[4631]: I1129 04:44:35.218180 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-54vg2" podStartSLOduration=2.7493608480000002 podStartE2EDuration="8.218165545s" podCreationTimestamp="2025-11-29 04:44:27 +0000 UTC" firstStartedPulling="2025-11-29 04:44:29.11191815 +0000 UTC m=+1996.176421674" lastFinishedPulling="2025-11-29 04:44:34.580722857 +0000 UTC m=+2001.645226371" observedRunningTime="2025-11-29 04:44:35.211564663 +0000 UTC m=+2002.276068177" watchObservedRunningTime="2025-11-29 04:44:35.218165545 +0000 UTC m=+2002.282669049" Nov 29 04:44:37 crc kubenswrapper[4631]: I1129 04:44:37.752469 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:37 crc kubenswrapper[4631]: I1129 04:44:37.753129 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:38 crc kubenswrapper[4631]: I1129 04:44:38.824150 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-54vg2" podUID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerName="registry-server" probeResult="failure" output=< Nov 29 04:44:38 crc kubenswrapper[4631]: timeout: failed to connect service ":50051" within 1s Nov 29 04:44:38 crc kubenswrapper[4631]: > Nov 29 04:44:48 crc kubenswrapper[4631]: I1129 04:44:48.080782 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:48 crc kubenswrapper[4631]: I1129 04:44:48.145725 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:48 crc kubenswrapper[4631]: I1129 04:44:48.312247 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-54vg2"] Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.044665 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-54vg2" podUID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerName="registry-server" containerID="cri-o://25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c" gracePeriod=2 Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.576661 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.742865 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-catalog-content\") pod \"71b89e09-9b70-4d95-a304-4b10c3400e8f\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.743058 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rpzd\" (UniqueName: \"kubernetes.io/projected/71b89e09-9b70-4d95-a304-4b10c3400e8f-kube-api-access-8rpzd\") pod \"71b89e09-9b70-4d95-a304-4b10c3400e8f\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.743116 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-utilities\") pod \"71b89e09-9b70-4d95-a304-4b10c3400e8f\" (UID: \"71b89e09-9b70-4d95-a304-4b10c3400e8f\") " Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.744855 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-utilities" (OuterVolumeSpecName: "utilities") pod "71b89e09-9b70-4d95-a304-4b10c3400e8f" (UID: "71b89e09-9b70-4d95-a304-4b10c3400e8f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.745386 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.749722 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71b89e09-9b70-4d95-a304-4b10c3400e8f-kube-api-access-8rpzd" (OuterVolumeSpecName: "kube-api-access-8rpzd") pod "71b89e09-9b70-4d95-a304-4b10c3400e8f" (UID: "71b89e09-9b70-4d95-a304-4b10c3400e8f"). InnerVolumeSpecName "kube-api-access-8rpzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.847753 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rpzd\" (UniqueName: \"kubernetes.io/projected/71b89e09-9b70-4d95-a304-4b10c3400e8f-kube-api-access-8rpzd\") on node \"crc\" DevicePath \"\"" Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.875829 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "71b89e09-9b70-4d95-a304-4b10c3400e8f" (UID: "71b89e09-9b70-4d95-a304-4b10c3400e8f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:44:50 crc kubenswrapper[4631]: I1129 04:44:50.950016 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71b89e09-9b70-4d95-a304-4b10c3400e8f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.059449 4631 generic.go:334] "Generic (PLEG): container finished" podID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerID="25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c" exitCode=0 Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.059486 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54vg2" event={"ID":"71b89e09-9b70-4d95-a304-4b10c3400e8f","Type":"ContainerDied","Data":"25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c"} Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.059559 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-54vg2" event={"ID":"71b89e09-9b70-4d95-a304-4b10c3400e8f","Type":"ContainerDied","Data":"b2ed38a0b3e513b9d8cddb36dcac1cd5fbd6330dad503a4b5280822d3e76c73d"} Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.059582 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-54vg2" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.059589 4631 scope.go:117] "RemoveContainer" containerID="25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.086427 4631 scope.go:117] "RemoveContainer" containerID="3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.119918 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-54vg2"] Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.129528 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-54vg2"] Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.149859 4631 scope.go:117] "RemoveContainer" containerID="d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.171751 4631 scope.go:117] "RemoveContainer" containerID="25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c" Nov 29 04:44:51 crc kubenswrapper[4631]: E1129 04:44:51.172150 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c\": container with ID starting with 25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c not found: ID does not exist" containerID="25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.172196 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c"} err="failed to get container status \"25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c\": rpc error: code = NotFound desc = could not find container \"25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c\": container with ID starting with 25a255c05f75e1f7a48675421b7045defbdbde631373fcd60a34efe700b01c2c not found: ID does not exist" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.172222 4631 scope.go:117] "RemoveContainer" containerID="3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa" Nov 29 04:44:51 crc kubenswrapper[4631]: E1129 04:44:51.172733 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa\": container with ID starting with 3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa not found: ID does not exist" containerID="3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.172765 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa"} err="failed to get container status \"3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa\": rpc error: code = NotFound desc = could not find container \"3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa\": container with ID starting with 3f601b03ef0cbaff9a02d3ed12a07144c01d383336028d9b7935348368fa58aa not found: ID does not exist" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.172808 4631 scope.go:117] "RemoveContainer" containerID="d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa" Nov 29 04:44:51 crc kubenswrapper[4631]: E1129 04:44:51.173038 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa\": container with ID starting with d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa not found: ID does not exist" containerID="d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.173060 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa"} err="failed to get container status \"d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa\": rpc error: code = NotFound desc = could not find container \"d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa\": container with ID starting with d2a2e175feda7c31ec692789b0a74bba090bd41d1cd82b26d82fa4d8538089fa not found: ID does not exist" Nov 29 04:44:51 crc kubenswrapper[4631]: I1129 04:44:51.230267 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71b89e09-9b70-4d95-a304-4b10c3400e8f" path="/var/lib/kubelet/pods/71b89e09-9b70-4d95-a304-4b10c3400e8f/volumes" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.177276 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j"] Nov 29 04:45:00 crc kubenswrapper[4631]: E1129 04:45:00.178685 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerName="registry-server" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.178711 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerName="registry-server" Nov 29 04:45:00 crc kubenswrapper[4631]: E1129 04:45:00.178749 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerName="extract-utilities" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.178763 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerName="extract-utilities" Nov 29 04:45:00 crc kubenswrapper[4631]: E1129 04:45:00.178797 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerName="extract-content" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.178814 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerName="extract-content" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.179210 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="71b89e09-9b70-4d95-a304-4b10c3400e8f" containerName="registry-server" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.180615 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.185410 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.191840 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.192682 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j"] Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.283924 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-secret-volume\") pod \"collect-profiles-29406525-rjv2j\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.284126 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-config-volume\") pod \"collect-profiles-29406525-rjv2j\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.284201 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k4fw\" (UniqueName: \"kubernetes.io/projected/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-kube-api-access-6k4fw\") pod \"collect-profiles-29406525-rjv2j\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.385762 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-config-volume\") pod \"collect-profiles-29406525-rjv2j\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.385840 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k4fw\" (UniqueName: \"kubernetes.io/projected/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-kube-api-access-6k4fw\") pod \"collect-profiles-29406525-rjv2j\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.386233 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-secret-volume\") pod \"collect-profiles-29406525-rjv2j\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.387227 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-config-volume\") pod \"collect-profiles-29406525-rjv2j\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.396694 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-secret-volume\") pod \"collect-profiles-29406525-rjv2j\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.402568 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k4fw\" (UniqueName: \"kubernetes.io/projected/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-kube-api-access-6k4fw\") pod \"collect-profiles-29406525-rjv2j\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:00 crc kubenswrapper[4631]: I1129 04:45:00.566416 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:01 crc kubenswrapper[4631]: I1129 04:45:01.040349 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j"] Nov 29 04:45:01 crc kubenswrapper[4631]: W1129 04:45:01.051245 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e144e30_1f4b_4fbf_b6d6_2b77338d8d6b.slice/crio-d92e8a59db871dba2bb31089c9609f0184da45eb60c648f868a855edff2d126c WatchSource:0}: Error finding container d92e8a59db871dba2bb31089c9609f0184da45eb60c648f868a855edff2d126c: Status 404 returned error can't find the container with id d92e8a59db871dba2bb31089c9609f0184da45eb60c648f868a855edff2d126c Nov 29 04:45:01 crc kubenswrapper[4631]: I1129 04:45:01.162778 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" event={"ID":"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b","Type":"ContainerStarted","Data":"d92e8a59db871dba2bb31089c9609f0184da45eb60c648f868a855edff2d126c"} Nov 29 04:45:02 crc kubenswrapper[4631]: I1129 04:45:02.177405 4631 generic.go:334] "Generic (PLEG): container finished" podID="5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b" containerID="81285e0c2f63ebbc28f0fc083702ebafb588be970a8d35e1adeae5a2eb57cd98" exitCode=0 Nov 29 04:45:02 crc kubenswrapper[4631]: I1129 04:45:02.177535 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" event={"ID":"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b","Type":"ContainerDied","Data":"81285e0c2f63ebbc28f0fc083702ebafb588be970a8d35e1adeae5a2eb57cd98"} Nov 29 04:45:03 crc kubenswrapper[4631]: I1129 04:45:03.609734 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:03 crc kubenswrapper[4631]: I1129 04:45:03.755441 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6k4fw\" (UniqueName: \"kubernetes.io/projected/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-kube-api-access-6k4fw\") pod \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " Nov 29 04:45:03 crc kubenswrapper[4631]: I1129 04:45:03.755733 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-config-volume\") pod \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " Nov 29 04:45:03 crc kubenswrapper[4631]: I1129 04:45:03.755978 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-secret-volume\") pod \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\" (UID: \"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b\") " Nov 29 04:45:03 crc kubenswrapper[4631]: I1129 04:45:03.756417 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-config-volume" (OuterVolumeSpecName: "config-volume") pod "5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b" (UID: "5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:45:03 crc kubenswrapper[4631]: I1129 04:45:03.761646 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-kube-api-access-6k4fw" (OuterVolumeSpecName: "kube-api-access-6k4fw") pod "5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b" (UID: "5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b"). InnerVolumeSpecName "kube-api-access-6k4fw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:45:03 crc kubenswrapper[4631]: I1129 04:45:03.768523 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b" (UID: "5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:45:03 crc kubenswrapper[4631]: I1129 04:45:03.858734 4631 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:03 crc kubenswrapper[4631]: I1129 04:45:03.858794 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6k4fw\" (UniqueName: \"kubernetes.io/projected/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-kube-api-access-6k4fw\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:03 crc kubenswrapper[4631]: I1129 04:45:03.858806 4631 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:04 crc kubenswrapper[4631]: I1129 04:45:04.200367 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" event={"ID":"5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b","Type":"ContainerDied","Data":"d92e8a59db871dba2bb31089c9609f0184da45eb60c648f868a855edff2d126c"} Nov 29 04:45:04 crc kubenswrapper[4631]: I1129 04:45:04.200415 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d92e8a59db871dba2bb31089c9609f0184da45eb60c648f868a855edff2d126c" Nov 29 04:45:04 crc kubenswrapper[4631]: I1129 04:45:04.200495 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406525-rjv2j" Nov 29 04:45:04 crc kubenswrapper[4631]: I1129 04:45:04.692903 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf"] Nov 29 04:45:04 crc kubenswrapper[4631]: I1129 04:45:04.700676 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406480-7l9nf"] Nov 29 04:45:05 crc kubenswrapper[4631]: I1129 04:45:05.231298 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78552157-0b5f-437a-988a-71805a812ab2" path="/var/lib/kubelet/pods/78552157-0b5f-437a-988a-71805a812ab2/volumes" Nov 29 04:45:19 crc kubenswrapper[4631]: I1129 04:45:19.365737 4631 generic.go:334] "Generic (PLEG): container finished" podID="12e84ebd-9825-4ef2-9356-626fdc73dbb8" containerID="058ad9f9b0541ea06df9347043c6e5b6e2dedf931c86d6eac9673cce1e33cc99" exitCode=0 Nov 29 04:45:19 crc kubenswrapper[4631]: I1129 04:45:19.365810 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" event={"ID":"12e84ebd-9825-4ef2-9356-626fdc73dbb8","Type":"ContainerDied","Data":"058ad9f9b0541ea06df9347043c6e5b6e2dedf931c86d6eac9673cce1e33cc99"} Nov 29 04:45:20 crc kubenswrapper[4631]: I1129 04:45:20.869973 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:45:20 crc kubenswrapper[4631]: I1129 04:45:20.993853 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-inventory\") pod \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " Nov 29 04:45:20 crc kubenswrapper[4631]: I1129 04:45:20.993969 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvqw6\" (UniqueName: \"kubernetes.io/projected/12e84ebd-9825-4ef2-9356-626fdc73dbb8-kube-api-access-xvqw6\") pod \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " Nov 29 04:45:20 crc kubenswrapper[4631]: I1129 04:45:20.994071 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-ssh-key\") pod \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\" (UID: \"12e84ebd-9825-4ef2-9356-626fdc73dbb8\") " Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.017878 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12e84ebd-9825-4ef2-9356-626fdc73dbb8-kube-api-access-xvqw6" (OuterVolumeSpecName: "kube-api-access-xvqw6") pod "12e84ebd-9825-4ef2-9356-626fdc73dbb8" (UID: "12e84ebd-9825-4ef2-9356-626fdc73dbb8"). InnerVolumeSpecName "kube-api-access-xvqw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.022407 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-inventory" (OuterVolumeSpecName: "inventory") pod "12e84ebd-9825-4ef2-9356-626fdc73dbb8" (UID: "12e84ebd-9825-4ef2-9356-626fdc73dbb8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.045802 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "12e84ebd-9825-4ef2-9356-626fdc73dbb8" (UID: "12e84ebd-9825-4ef2-9356-626fdc73dbb8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.095983 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xvqw6\" (UniqueName: \"kubernetes.io/projected/12e84ebd-9825-4ef2-9356-626fdc73dbb8-kube-api-access-xvqw6\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.096018 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.096030 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/12e84ebd-9825-4ef2-9356-626fdc73dbb8-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.389772 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" event={"ID":"12e84ebd-9825-4ef2-9356-626fdc73dbb8","Type":"ContainerDied","Data":"3b822788da17feb39f6183a1c5ff428a8b488e12c1fb9de953b0ab3d8ae62db2"} Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.390017 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b822788da17feb39f6183a1c5ff428a8b488e12c1fb9de953b0ab3d8ae62db2" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.389861 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.555589 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-r2trh"] Nov 29 04:45:21 crc kubenswrapper[4631]: E1129 04:45:21.555932 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b" containerName="collect-profiles" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.555948 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b" containerName="collect-profiles" Nov 29 04:45:21 crc kubenswrapper[4631]: E1129 04:45:21.555967 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12e84ebd-9825-4ef2-9356-626fdc73dbb8" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.555974 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="12e84ebd-9825-4ef2-9356-626fdc73dbb8" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.556129 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e144e30-1f4b-4fbf-b6d6-2b77338d8d6b" containerName="collect-profiles" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.556144 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="12e84ebd-9825-4ef2-9356-626fdc73dbb8" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.556856 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.559628 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.560999 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.563350 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.564191 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.572128 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-r2trh"] Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.603941 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-r2trh\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.604306 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-r2trh\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.604491 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-976xf\" (UniqueName: \"kubernetes.io/projected/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-kube-api-access-976xf\") pod \"ssh-known-hosts-edpm-deployment-r2trh\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.706955 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-976xf\" (UniqueName: \"kubernetes.io/projected/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-kube-api-access-976xf\") pod \"ssh-known-hosts-edpm-deployment-r2trh\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.707109 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-r2trh\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.707225 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-r2trh\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.714477 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-r2trh\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.719510 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-r2trh\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.741645 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-976xf\" (UniqueName: \"kubernetes.io/projected/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-kube-api-access-976xf\") pod \"ssh-known-hosts-edpm-deployment-r2trh\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:21 crc kubenswrapper[4631]: I1129 04:45:21.872957 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:22 crc kubenswrapper[4631]: I1129 04:45:22.189796 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-r2trh"] Nov 29 04:45:22 crc kubenswrapper[4631]: I1129 04:45:22.200991 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 04:45:22 crc kubenswrapper[4631]: I1129 04:45:22.401954 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" event={"ID":"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d","Type":"ContainerStarted","Data":"f69cbe4c6ccfed70decbaa31ec9e548d8e7c211cb7a08571e69cf842db98a138"} Nov 29 04:45:23 crc kubenswrapper[4631]: I1129 04:45:23.423422 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" event={"ID":"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d","Type":"ContainerStarted","Data":"7c8d0bc2f05afff43926183bb81680373fbd7b5b96e520f2c18db3c600b4266b"} Nov 29 04:45:23 crc kubenswrapper[4631]: I1129 04:45:23.453392 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" podStartSLOduration=1.774274546 podStartE2EDuration="2.453366969s" podCreationTimestamp="2025-11-29 04:45:21 +0000 UTC" firstStartedPulling="2025-11-29 04:45:22.200767365 +0000 UTC m=+2049.265270879" lastFinishedPulling="2025-11-29 04:45:22.879859758 +0000 UTC m=+2049.944363302" observedRunningTime="2025-11-29 04:45:23.4432108 +0000 UTC m=+2050.507714324" watchObservedRunningTime="2025-11-29 04:45:23.453366969 +0000 UTC m=+2050.517870513" Nov 29 04:45:31 crc kubenswrapper[4631]: I1129 04:45:31.515543 4631 generic.go:334] "Generic (PLEG): container finished" podID="25a7e95d-1575-46e8-9ab3-a21aa8f08b3d" containerID="7c8d0bc2f05afff43926183bb81680373fbd7b5b96e520f2c18db3c600b4266b" exitCode=0 Nov 29 04:45:31 crc kubenswrapper[4631]: I1129 04:45:31.515668 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" event={"ID":"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d","Type":"ContainerDied","Data":"7c8d0bc2f05afff43926183bb81680373fbd7b5b96e520f2c18db3c600b4266b"} Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.049608 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.165650 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-inventory-0\") pod \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.165800 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-ssh-key-openstack-edpm-ipam\") pod \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.165843 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-976xf\" (UniqueName: \"kubernetes.io/projected/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-kube-api-access-976xf\") pod \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\" (UID: \"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d\") " Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.172078 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-kube-api-access-976xf" (OuterVolumeSpecName: "kube-api-access-976xf") pod "25a7e95d-1575-46e8-9ab3-a21aa8f08b3d" (UID: "25a7e95d-1575-46e8-9ab3-a21aa8f08b3d"). InnerVolumeSpecName "kube-api-access-976xf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.206296 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "25a7e95d-1575-46e8-9ab3-a21aa8f08b3d" (UID: "25a7e95d-1575-46e8-9ab3-a21aa8f08b3d"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.218549 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "25a7e95d-1575-46e8-9ab3-a21aa8f08b3d" (UID: "25a7e95d-1575-46e8-9ab3-a21aa8f08b3d"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.269457 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.269700 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-976xf\" (UniqueName: \"kubernetes.io/projected/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-kube-api-access-976xf\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.269937 4631 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/25a7e95d-1575-46e8-9ab3-a21aa8f08b3d-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.543005 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" event={"ID":"25a7e95d-1575-46e8-9ab3-a21aa8f08b3d","Type":"ContainerDied","Data":"f69cbe4c6ccfed70decbaa31ec9e548d8e7c211cb7a08571e69cf842db98a138"} Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.543043 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f69cbe4c6ccfed70decbaa31ec9e548d8e7c211cb7a08571e69cf842db98a138" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.543109 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-r2trh" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.682467 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr"] Nov 29 04:45:33 crc kubenswrapper[4631]: E1129 04:45:33.683139 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25a7e95d-1575-46e8-9ab3-a21aa8f08b3d" containerName="ssh-known-hosts-edpm-deployment" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.683169 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="25a7e95d-1575-46e8-9ab3-a21aa8f08b3d" containerName="ssh-known-hosts-edpm-deployment" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.683560 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="25a7e95d-1575-46e8-9ab3-a21aa8f08b3d" containerName="ssh-known-hosts-edpm-deployment" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.684655 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.687839 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.688260 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.690613 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.694615 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr"] Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.697523 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.780345 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gcswr\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.780480 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gcswr\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.780527 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p92ql\" (UniqueName: \"kubernetes.io/projected/f32f1eab-b255-4518-ba67-d9a46362d4f7-kube-api-access-p92ql\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gcswr\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.881856 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gcswr\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.882024 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gcswr\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.882087 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p92ql\" (UniqueName: \"kubernetes.io/projected/f32f1eab-b255-4518-ba67-d9a46362d4f7-kube-api-access-p92ql\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gcswr\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.886704 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gcswr\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.889835 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gcswr\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:33 crc kubenswrapper[4631]: I1129 04:45:33.912143 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p92ql\" (UniqueName: \"kubernetes.io/projected/f32f1eab-b255-4518-ba67-d9a46362d4f7-kube-api-access-p92ql\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gcswr\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:34 crc kubenswrapper[4631]: I1129 04:45:34.009023 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:34 crc kubenswrapper[4631]: I1129 04:45:34.400521 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr"] Nov 29 04:45:34 crc kubenswrapper[4631]: I1129 04:45:34.555168 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" event={"ID":"f32f1eab-b255-4518-ba67-d9a46362d4f7","Type":"ContainerStarted","Data":"cbc0d0db4eebb788cfd8073a617d4eda676a555bcc4a7d3cbad81508ba6033bd"} Nov 29 04:45:35 crc kubenswrapper[4631]: I1129 04:45:35.572784 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" event={"ID":"f32f1eab-b255-4518-ba67-d9a46362d4f7","Type":"ContainerStarted","Data":"1fda406e50ed14a660a6daf63d879b14037e3a0d9da4a68239f67569e3ee2668"} Nov 29 04:45:35 crc kubenswrapper[4631]: I1129 04:45:35.591840 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" podStartSLOduration=1.9187035620000001 podStartE2EDuration="2.591826779s" podCreationTimestamp="2025-11-29 04:45:33 +0000 UTC" firstStartedPulling="2025-11-29 04:45:34.409724606 +0000 UTC m=+2061.474228120" lastFinishedPulling="2025-11-29 04:45:35.082847783 +0000 UTC m=+2062.147351337" observedRunningTime="2025-11-29 04:45:35.586273472 +0000 UTC m=+2062.650776986" watchObservedRunningTime="2025-11-29 04:45:35.591826779 +0000 UTC m=+2062.656330293" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.315045 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vf478"] Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.320848 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.330514 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vf478"] Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.444012 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-catalog-content\") pod \"community-operators-vf478\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.444069 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-utilities\") pod \"community-operators-vf478\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.444125 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wr4hq\" (UniqueName: \"kubernetes.io/projected/e24caa99-9dd8-4573-8001-19c92fff6692-kube-api-access-wr4hq\") pod \"community-operators-vf478\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.545766 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-catalog-content\") pod \"community-operators-vf478\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.545835 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-utilities\") pod \"community-operators-vf478\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.545899 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wr4hq\" (UniqueName: \"kubernetes.io/projected/e24caa99-9dd8-4573-8001-19c92fff6692-kube-api-access-wr4hq\") pod \"community-operators-vf478\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.546575 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-catalog-content\") pod \"community-operators-vf478\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.546593 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-utilities\") pod \"community-operators-vf478\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.572360 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wr4hq\" (UniqueName: \"kubernetes.io/projected/e24caa99-9dd8-4573-8001-19c92fff6692-kube-api-access-wr4hq\") pod \"community-operators-vf478\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:36 crc kubenswrapper[4631]: I1129 04:45:36.659499 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:37 crc kubenswrapper[4631]: I1129 04:45:37.004576 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vf478"] Nov 29 04:45:37 crc kubenswrapper[4631]: I1129 04:45:37.592416 4631 generic.go:334] "Generic (PLEG): container finished" podID="e24caa99-9dd8-4573-8001-19c92fff6692" containerID="e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565" exitCode=0 Nov 29 04:45:37 crc kubenswrapper[4631]: I1129 04:45:37.592497 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vf478" event={"ID":"e24caa99-9dd8-4573-8001-19c92fff6692","Type":"ContainerDied","Data":"e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565"} Nov 29 04:45:37 crc kubenswrapper[4631]: I1129 04:45:37.592778 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vf478" event={"ID":"e24caa99-9dd8-4573-8001-19c92fff6692","Type":"ContainerStarted","Data":"07054b0cb2b61d9f218cb695502d375e2ddc49de850db64060a178ab2edf91f7"} Nov 29 04:45:39 crc kubenswrapper[4631]: I1129 04:45:39.614321 4631 generic.go:334] "Generic (PLEG): container finished" podID="e24caa99-9dd8-4573-8001-19c92fff6692" containerID="485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31" exitCode=0 Nov 29 04:45:39 crc kubenswrapper[4631]: I1129 04:45:39.614684 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vf478" event={"ID":"e24caa99-9dd8-4573-8001-19c92fff6692","Type":"ContainerDied","Data":"485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31"} Nov 29 04:45:40 crc kubenswrapper[4631]: I1129 04:45:40.625191 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vf478" event={"ID":"e24caa99-9dd8-4573-8001-19c92fff6692","Type":"ContainerStarted","Data":"f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54"} Nov 29 04:45:40 crc kubenswrapper[4631]: I1129 04:45:40.649420 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vf478" podStartSLOduration=2.114909136 podStartE2EDuration="4.649404142s" podCreationTimestamp="2025-11-29 04:45:36 +0000 UTC" firstStartedPulling="2025-11-29 04:45:37.594875358 +0000 UTC m=+2064.659378912" lastFinishedPulling="2025-11-29 04:45:40.129370364 +0000 UTC m=+2067.193873918" observedRunningTime="2025-11-29 04:45:40.645288201 +0000 UTC m=+2067.709791715" watchObservedRunningTime="2025-11-29 04:45:40.649404142 +0000 UTC m=+2067.713907656" Nov 29 04:45:44 crc kubenswrapper[4631]: I1129 04:45:44.672866 4631 generic.go:334] "Generic (PLEG): container finished" podID="f32f1eab-b255-4518-ba67-d9a46362d4f7" containerID="1fda406e50ed14a660a6daf63d879b14037e3a0d9da4a68239f67569e3ee2668" exitCode=0 Nov 29 04:45:44 crc kubenswrapper[4631]: I1129 04:45:44.673203 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" event={"ID":"f32f1eab-b255-4518-ba67-d9a46362d4f7","Type":"ContainerDied","Data":"1fda406e50ed14a660a6daf63d879b14037e3a0d9da4a68239f67569e3ee2668"} Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.073712 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.238574 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-ssh-key\") pod \"f32f1eab-b255-4518-ba67-d9a46362d4f7\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.239006 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-inventory\") pod \"f32f1eab-b255-4518-ba67-d9a46362d4f7\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.239099 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p92ql\" (UniqueName: \"kubernetes.io/projected/f32f1eab-b255-4518-ba67-d9a46362d4f7-kube-api-access-p92ql\") pod \"f32f1eab-b255-4518-ba67-d9a46362d4f7\" (UID: \"f32f1eab-b255-4518-ba67-d9a46362d4f7\") " Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.246037 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f32f1eab-b255-4518-ba67-d9a46362d4f7-kube-api-access-p92ql" (OuterVolumeSpecName: "kube-api-access-p92ql") pod "f32f1eab-b255-4518-ba67-d9a46362d4f7" (UID: "f32f1eab-b255-4518-ba67-d9a46362d4f7"). InnerVolumeSpecName "kube-api-access-p92ql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.276831 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-inventory" (OuterVolumeSpecName: "inventory") pod "f32f1eab-b255-4518-ba67-d9a46362d4f7" (UID: "f32f1eab-b255-4518-ba67-d9a46362d4f7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.284835 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f32f1eab-b255-4518-ba67-d9a46362d4f7" (UID: "f32f1eab-b255-4518-ba67-d9a46362d4f7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.341213 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.341428 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p92ql\" (UniqueName: \"kubernetes.io/projected/f32f1eab-b255-4518-ba67-d9a46362d4f7-kube-api-access-p92ql\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.341506 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f32f1eab-b255-4518-ba67-d9a46362d4f7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.660498 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.660927 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.694442 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" event={"ID":"f32f1eab-b255-4518-ba67-d9a46362d4f7","Type":"ContainerDied","Data":"cbc0d0db4eebb788cfd8073a617d4eda676a555bcc4a7d3cbad81508ba6033bd"} Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.694484 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cbc0d0db4eebb788cfd8073a617d4eda676a555bcc4a7d3cbad81508ba6033bd" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.694532 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gcswr" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.742620 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.826714 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.836374 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86"] Nov 29 04:45:46 crc kubenswrapper[4631]: E1129 04:45:46.836829 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f32f1eab-b255-4518-ba67-d9a46362d4f7" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.836849 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="f32f1eab-b255-4518-ba67-d9a46362d4f7" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.837056 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="f32f1eab-b255-4518-ba67-d9a46362d4f7" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.837957 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.841244 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.841444 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.841590 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.841768 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.864113 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86"] Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.873176 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.873288 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.873443 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vb4kq\" (UniqueName: \"kubernetes.io/projected/6b0158a9-97f1-46c7-a984-b1e6876d4b57-kube-api-access-vb4kq\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.975043 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.975133 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.975237 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vb4kq\" (UniqueName: \"kubernetes.io/projected/6b0158a9-97f1-46c7-a984-b1e6876d4b57-kube-api-access-vb4kq\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.989957 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:46 crc kubenswrapper[4631]: I1129 04:45:46.990926 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:47 crc kubenswrapper[4631]: I1129 04:45:47.005760 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vb4kq\" (UniqueName: \"kubernetes.io/projected/6b0158a9-97f1-46c7-a984-b1e6876d4b57-kube-api-access-vb4kq\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:47 crc kubenswrapper[4631]: I1129 04:45:47.013374 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vf478"] Nov 29 04:45:47 crc kubenswrapper[4631]: I1129 04:45:47.158298 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:45:47 crc kubenswrapper[4631]: I1129 04:45:47.680196 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86"] Nov 29 04:45:47 crc kubenswrapper[4631]: I1129 04:45:47.707566 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" event={"ID":"6b0158a9-97f1-46c7-a984-b1e6876d4b57","Type":"ContainerStarted","Data":"d5642a8dfcf47e37b73d25d5dadfe2ee1d50be096b009e20c396bfb706f79136"} Nov 29 04:45:48 crc kubenswrapper[4631]: I1129 04:45:48.716842 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" event={"ID":"6b0158a9-97f1-46c7-a984-b1e6876d4b57","Type":"ContainerStarted","Data":"00d9bd4687d43e49488a2b19fceee0350803048a919d1a6ab42b2fa653873c85"} Nov 29 04:45:48 crc kubenswrapper[4631]: I1129 04:45:48.717003 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vf478" podUID="e24caa99-9dd8-4573-8001-19c92fff6692" containerName="registry-server" containerID="cri-o://f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54" gracePeriod=2 Nov 29 04:45:48 crc kubenswrapper[4631]: I1129 04:45:48.759890 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" podStartSLOduration=2.079548224 podStartE2EDuration="2.759872768s" podCreationTimestamp="2025-11-29 04:45:46 +0000 UTC" firstStartedPulling="2025-11-29 04:45:47.690630026 +0000 UTC m=+2074.755133580" lastFinishedPulling="2025-11-29 04:45:48.37095461 +0000 UTC m=+2075.435458124" observedRunningTime="2025-11-29 04:45:48.756186388 +0000 UTC m=+2075.820689912" watchObservedRunningTime="2025-11-29 04:45:48.759872768 +0000 UTC m=+2075.824376282" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.212210 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.336784 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-utilities\") pod \"e24caa99-9dd8-4573-8001-19c92fff6692\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.338032 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-utilities" (OuterVolumeSpecName: "utilities") pod "e24caa99-9dd8-4573-8001-19c92fff6692" (UID: "e24caa99-9dd8-4573-8001-19c92fff6692"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.338256 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-catalog-content\") pod \"e24caa99-9dd8-4573-8001-19c92fff6692\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.338319 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wr4hq\" (UniqueName: \"kubernetes.io/projected/e24caa99-9dd8-4573-8001-19c92fff6692-kube-api-access-wr4hq\") pod \"e24caa99-9dd8-4573-8001-19c92fff6692\" (UID: \"e24caa99-9dd8-4573-8001-19c92fff6692\") " Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.340079 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.343966 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e24caa99-9dd8-4573-8001-19c92fff6692-kube-api-access-wr4hq" (OuterVolumeSpecName: "kube-api-access-wr4hq") pod "e24caa99-9dd8-4573-8001-19c92fff6692" (UID: "e24caa99-9dd8-4573-8001-19c92fff6692"). InnerVolumeSpecName "kube-api-access-wr4hq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.401457 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e24caa99-9dd8-4573-8001-19c92fff6692" (UID: "e24caa99-9dd8-4573-8001-19c92fff6692"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.441245 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e24caa99-9dd8-4573-8001-19c92fff6692-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.441277 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wr4hq\" (UniqueName: \"kubernetes.io/projected/e24caa99-9dd8-4573-8001-19c92fff6692-kube-api-access-wr4hq\") on node \"crc\" DevicePath \"\"" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.745780 4631 generic.go:334] "Generic (PLEG): container finished" podID="e24caa99-9dd8-4573-8001-19c92fff6692" containerID="f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54" exitCode=0 Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.745883 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vf478" event={"ID":"e24caa99-9dd8-4573-8001-19c92fff6692","Type":"ContainerDied","Data":"f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54"} Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.747610 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vf478" event={"ID":"e24caa99-9dd8-4573-8001-19c92fff6692","Type":"ContainerDied","Data":"07054b0cb2b61d9f218cb695502d375e2ddc49de850db64060a178ab2edf91f7"} Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.745901 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vf478" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.747667 4631 scope.go:117] "RemoveContainer" containerID="f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.787023 4631 scope.go:117] "RemoveContainer" containerID="485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.797085 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vf478"] Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.808772 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vf478"] Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.829589 4631 scope.go:117] "RemoveContainer" containerID="e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.871022 4631 scope.go:117] "RemoveContainer" containerID="f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54" Nov 29 04:45:49 crc kubenswrapper[4631]: E1129 04:45:49.872074 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54\": container with ID starting with f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54 not found: ID does not exist" containerID="f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.872106 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54"} err="failed to get container status \"f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54\": rpc error: code = NotFound desc = could not find container \"f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54\": container with ID starting with f38751a04f1faa3770c60fed97b311f6f319d62b632c0bfbd6ea92aedddecc54 not found: ID does not exist" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.872125 4631 scope.go:117] "RemoveContainer" containerID="485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31" Nov 29 04:45:49 crc kubenswrapper[4631]: E1129 04:45:49.872518 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31\": container with ID starting with 485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31 not found: ID does not exist" containerID="485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.872534 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31"} err="failed to get container status \"485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31\": rpc error: code = NotFound desc = could not find container \"485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31\": container with ID starting with 485fa4a485cc4491ae4246ef82fa03e08f60dc53ecdd38fad1ae00f961370a31 not found: ID does not exist" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.872546 4631 scope.go:117] "RemoveContainer" containerID="e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565" Nov 29 04:45:49 crc kubenswrapper[4631]: E1129 04:45:49.873449 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565\": container with ID starting with e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565 not found: ID does not exist" containerID="e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565" Nov 29 04:45:49 crc kubenswrapper[4631]: I1129 04:45:49.873479 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565"} err="failed to get container status \"e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565\": rpc error: code = NotFound desc = could not find container \"e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565\": container with ID starting with e4317834f7f01f05284b9f3a0016eef6f88c673b5f6334341c8eaf281c6e8565 not found: ID does not exist" Nov 29 04:45:51 crc kubenswrapper[4631]: I1129 04:45:51.227517 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e24caa99-9dd8-4573-8001-19c92fff6692" path="/var/lib/kubelet/pods/e24caa99-9dd8-4573-8001-19c92fff6692/volumes" Nov 29 04:45:58 crc kubenswrapper[4631]: I1129 04:45:58.849208 4631 generic.go:334] "Generic (PLEG): container finished" podID="6b0158a9-97f1-46c7-a984-b1e6876d4b57" containerID="00d9bd4687d43e49488a2b19fceee0350803048a919d1a6ab42b2fa653873c85" exitCode=0 Nov 29 04:45:58 crc kubenswrapper[4631]: I1129 04:45:58.849856 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" event={"ID":"6b0158a9-97f1-46c7-a984-b1e6876d4b57","Type":"ContainerDied","Data":"00d9bd4687d43e49488a2b19fceee0350803048a919d1a6ab42b2fa653873c85"} Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.332913 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.383175 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-ssh-key\") pod \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.383279 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-inventory\") pod \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.383311 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vb4kq\" (UniqueName: \"kubernetes.io/projected/6b0158a9-97f1-46c7-a984-b1e6876d4b57-kube-api-access-vb4kq\") pod \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\" (UID: \"6b0158a9-97f1-46c7-a984-b1e6876d4b57\") " Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.388289 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b0158a9-97f1-46c7-a984-b1e6876d4b57-kube-api-access-vb4kq" (OuterVolumeSpecName: "kube-api-access-vb4kq") pod "6b0158a9-97f1-46c7-a984-b1e6876d4b57" (UID: "6b0158a9-97f1-46c7-a984-b1e6876d4b57"). InnerVolumeSpecName "kube-api-access-vb4kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.409709 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6b0158a9-97f1-46c7-a984-b1e6876d4b57" (UID: "6b0158a9-97f1-46c7-a984-b1e6876d4b57"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.413513 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-inventory" (OuterVolumeSpecName: "inventory") pod "6b0158a9-97f1-46c7-a984-b1e6876d4b57" (UID: "6b0158a9-97f1-46c7-a984-b1e6876d4b57"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.485649 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.485681 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b0158a9-97f1-46c7-a984-b1e6876d4b57-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.485690 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vb4kq\" (UniqueName: \"kubernetes.io/projected/6b0158a9-97f1-46c7-a984-b1e6876d4b57-kube-api-access-vb4kq\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.873898 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" event={"ID":"6b0158a9-97f1-46c7-a984-b1e6876d4b57","Type":"ContainerDied","Data":"d5642a8dfcf47e37b73d25d5dadfe2ee1d50be096b009e20c396bfb706f79136"} Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.874290 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5642a8dfcf47e37b73d25d5dadfe2ee1d50be096b009e20c396bfb706f79136" Nov 29 04:46:00 crc kubenswrapper[4631]: I1129 04:46:00.874146 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.027383 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2"] Nov 29 04:46:01 crc kubenswrapper[4631]: E1129 04:46:01.027792 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e24caa99-9dd8-4573-8001-19c92fff6692" containerName="extract-content" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.027808 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="e24caa99-9dd8-4573-8001-19c92fff6692" containerName="extract-content" Nov 29 04:46:01 crc kubenswrapper[4631]: E1129 04:46:01.027832 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e24caa99-9dd8-4573-8001-19c92fff6692" containerName="extract-utilities" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.027839 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="e24caa99-9dd8-4573-8001-19c92fff6692" containerName="extract-utilities" Nov 29 04:46:01 crc kubenswrapper[4631]: E1129 04:46:01.027853 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b0158a9-97f1-46c7-a984-b1e6876d4b57" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.027861 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b0158a9-97f1-46c7-a984-b1e6876d4b57" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:46:01 crc kubenswrapper[4631]: E1129 04:46:01.027873 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e24caa99-9dd8-4573-8001-19c92fff6692" containerName="registry-server" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.027879 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="e24caa99-9dd8-4573-8001-19c92fff6692" containerName="registry-server" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.028066 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b0158a9-97f1-46c7-a984-b1e6876d4b57" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.028091 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="e24caa99-9dd8-4573-8001-19c92fff6692" containerName="registry-server" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.028712 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.033237 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.033462 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.033579 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.037924 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.038267 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.038520 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.038699 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.042209 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.072571 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2"] Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.106412 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.106459 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.106502 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.106523 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.106549 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.106567 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.106583 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.106696 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgv5z\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-kube-api-access-fgv5z\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.106896 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.106990 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.107122 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.107175 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.107204 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.107273 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209167 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209233 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209272 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209320 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgv5z\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-kube-api-access-fgv5z\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209484 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209546 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209623 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209661 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209700 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209750 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209827 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209877 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209939 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.209971 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.214782 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.217049 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.217082 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.217936 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.218291 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.221603 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.221731 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.226325 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.230396 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.230679 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.234566 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.238424 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.239897 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.241037 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgv5z\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-kube-api-access-fgv5z\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.371416 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:01 crc kubenswrapper[4631]: I1129 04:46:01.954678 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2"] Nov 29 04:46:02 crc kubenswrapper[4631]: I1129 04:46:02.893715 4631 scope.go:117] "RemoveContainer" containerID="69dc560d11ca0b45c0f2bcbea2b6c8f2a41940a43fb4bd5cfbc5ddea9af4bb14" Nov 29 04:46:02 crc kubenswrapper[4631]: I1129 04:46:02.901307 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" event={"ID":"d39ef35e-1420-489c-9637-c89eb39ba398","Type":"ContainerStarted","Data":"f8a5041caefd22820dae4293e744701a375fba45c3330afe60c9711f1272f66b"} Nov 29 04:46:02 crc kubenswrapper[4631]: I1129 04:46:02.901458 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" event={"ID":"d39ef35e-1420-489c-9637-c89eb39ba398","Type":"ContainerStarted","Data":"6a868292905b72831efefe8d49ab376a669ac335c77b0d72aad53230f157c6f5"} Nov 29 04:46:02 crc kubenswrapper[4631]: I1129 04:46:02.953566 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" podStartSLOduration=2.498224429 podStartE2EDuration="2.953540738s" podCreationTimestamp="2025-11-29 04:46:00 +0000 UTC" firstStartedPulling="2025-11-29 04:46:01.957481132 +0000 UTC m=+2089.021984656" lastFinishedPulling="2025-11-29 04:46:02.412797411 +0000 UTC m=+2089.477300965" observedRunningTime="2025-11-29 04:46:02.933139527 +0000 UTC m=+2089.997643101" watchObservedRunningTime="2025-11-29 04:46:02.953540738 +0000 UTC m=+2090.018044292" Nov 29 04:46:47 crc kubenswrapper[4631]: I1129 04:46:47.373067 4631 generic.go:334] "Generic (PLEG): container finished" podID="d39ef35e-1420-489c-9637-c89eb39ba398" containerID="f8a5041caefd22820dae4293e744701a375fba45c3330afe60c9711f1272f66b" exitCode=0 Nov 29 04:46:47 crc kubenswrapper[4631]: I1129 04:46:47.373145 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" event={"ID":"d39ef35e-1420-489c-9637-c89eb39ba398","Type":"ContainerDied","Data":"f8a5041caefd22820dae4293e744701a375fba45c3330afe60c9711f1272f66b"} Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.628529 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.735474 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-bootstrap-combined-ca-bundle\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.735774 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-neutron-metadata-combined-ca-bundle\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.735817 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-inventory\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.735850 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-nova-combined-ca-bundle\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.735870 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.735928 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-libvirt-combined-ca-bundle\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.736042 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ovn-combined-ca-bundle\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.736069 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.736093 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgv5z\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-kube-api-access-fgv5z\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.736114 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-ovn-default-certs-0\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.736145 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-repo-setup-combined-ca-bundle\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.736165 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-telemetry-combined-ca-bundle\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.736189 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ssh-key\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.736214 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"d39ef35e-1420-489c-9637-c89eb39ba398\" (UID: \"d39ef35e-1420-489c-9637-c89eb39ba398\") " Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.743045 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.743823 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.743877 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.745617 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.745753 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.745956 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.748097 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.749075 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.751097 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.751117 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.751691 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-kube-api-access-fgv5z" (OuterVolumeSpecName: "kube-api-access-fgv5z") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "kube-api-access-fgv5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.762034 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.771534 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-inventory" (OuterVolumeSpecName: "inventory") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.772838 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d39ef35e-1420-489c-9637-c89eb39ba398" (UID: "d39ef35e-1420-489c-9637-c89eb39ba398"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840058 4631 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840125 4631 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840142 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgv5z\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-kube-api-access-fgv5z\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840155 4631 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840168 4631 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840226 4631 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840242 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840276 4631 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840292 4631 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840305 4631 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840318 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840363 4631 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840377 4631 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d39ef35e-1420-489c-9637-c89eb39ba398-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:49 crc kubenswrapper[4631]: I1129 04:46:49.840392 4631 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39ef35e-1420-489c-9637-c89eb39ba398-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.177042 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" event={"ID":"d39ef35e-1420-489c-9637-c89eb39ba398","Type":"ContainerDied","Data":"6a868292905b72831efefe8d49ab376a669ac335c77b0d72aad53230f157c6f5"} Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.177105 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a868292905b72831efefe8d49ab376a669ac335c77b0d72aad53230f157c6f5" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.177113 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.716198 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.716274 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.771341 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v"] Nov 29 04:46:50 crc kubenswrapper[4631]: E1129 04:46:50.771807 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d39ef35e-1420-489c-9637-c89eb39ba398" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.771829 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="d39ef35e-1420-489c-9637-c89eb39ba398" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.772041 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="d39ef35e-1420-489c-9637-c89eb39ba398" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.772756 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.777045 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.777294 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.777299 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.777388 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.777407 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.796573 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v"] Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.859687 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.859934 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99dmf\" (UniqueName: \"kubernetes.io/projected/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-kube-api-access-99dmf\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.860008 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.860127 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.860257 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.961878 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.961961 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.962029 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.962613 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.962641 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99dmf\" (UniqueName: \"kubernetes.io/projected/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-kube-api-access-99dmf\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.962887 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.966436 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.970543 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.978234 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:50 crc kubenswrapper[4631]: I1129 04:46:50.983903 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99dmf\" (UniqueName: \"kubernetes.io/projected/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-kube-api-access-99dmf\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-msq8v\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:51 crc kubenswrapper[4631]: I1129 04:46:51.094600 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:46:51 crc kubenswrapper[4631]: I1129 04:46:51.624078 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v"] Nov 29 04:46:52 crc kubenswrapper[4631]: I1129 04:46:52.199796 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" event={"ID":"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d","Type":"ContainerStarted","Data":"aded0a01454257b46e07cd3e0b076dd8b307d65f9ac5091ef2be1ec57fdec3dd"} Nov 29 04:46:53 crc kubenswrapper[4631]: I1129 04:46:53.214944 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" event={"ID":"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d","Type":"ContainerStarted","Data":"5bf28edf29266c8864a078cfd895224cc10f7d47140a3573b38e977fc7c64d06"} Nov 29 04:46:53 crc kubenswrapper[4631]: I1129 04:46:53.258489 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" podStartSLOduration=2.760952243 podStartE2EDuration="3.258468925s" podCreationTimestamp="2025-11-29 04:46:50 +0000 UTC" firstStartedPulling="2025-11-29 04:46:51.630889258 +0000 UTC m=+2138.695392772" lastFinishedPulling="2025-11-29 04:46:52.12840594 +0000 UTC m=+2139.192909454" observedRunningTime="2025-11-29 04:46:53.237313955 +0000 UTC m=+2140.301817499" watchObservedRunningTime="2025-11-29 04:46:53.258468925 +0000 UTC m=+2140.322972449" Nov 29 04:47:20 crc kubenswrapper[4631]: I1129 04:47:20.716315 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:47:20 crc kubenswrapper[4631]: I1129 04:47:20.716856 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.244981 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k775b"] Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.247820 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.270398 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k775b"] Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.285616 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-catalog-content\") pod \"certified-operators-k775b\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.285907 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9slcr\" (UniqueName: \"kubernetes.io/projected/87d660d6-6ecf-4ac5-8afa-a47a01723692-kube-api-access-9slcr\") pod \"certified-operators-k775b\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.285962 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-utilities\") pod \"certified-operators-k775b\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.388304 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-catalog-content\") pod \"certified-operators-k775b\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.388419 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9slcr\" (UniqueName: \"kubernetes.io/projected/87d660d6-6ecf-4ac5-8afa-a47a01723692-kube-api-access-9slcr\") pod \"certified-operators-k775b\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.388438 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-utilities\") pod \"certified-operators-k775b\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.389013 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-utilities\") pod \"certified-operators-k775b\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.389302 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-catalog-content\") pod \"certified-operators-k775b\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.414066 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9slcr\" (UniqueName: \"kubernetes.io/projected/87d660d6-6ecf-4ac5-8afa-a47a01723692-kube-api-access-9slcr\") pod \"certified-operators-k775b\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:39 crc kubenswrapper[4631]: I1129 04:47:39.585814 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:40 crc kubenswrapper[4631]: I1129 04:47:40.123726 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k775b"] Nov 29 04:47:40 crc kubenswrapper[4631]: I1129 04:47:40.669218 4631 generic.go:334] "Generic (PLEG): container finished" podID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerID="14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd" exitCode=0 Nov 29 04:47:40 crc kubenswrapper[4631]: I1129 04:47:40.669300 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k775b" event={"ID":"87d660d6-6ecf-4ac5-8afa-a47a01723692","Type":"ContainerDied","Data":"14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd"} Nov 29 04:47:40 crc kubenswrapper[4631]: I1129 04:47:40.669553 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k775b" event={"ID":"87d660d6-6ecf-4ac5-8afa-a47a01723692","Type":"ContainerStarted","Data":"beccd3ecf201acda00f65ec2c5d22f5a6351849731bcc9d0b08cf0acc50ee0ba"} Nov 29 04:47:42 crc kubenswrapper[4631]: I1129 04:47:42.690406 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k775b" event={"ID":"87d660d6-6ecf-4ac5-8afa-a47a01723692","Type":"ContainerStarted","Data":"40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3"} Nov 29 04:47:43 crc kubenswrapper[4631]: I1129 04:47:43.703321 4631 generic.go:334] "Generic (PLEG): container finished" podID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerID="40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3" exitCode=0 Nov 29 04:47:43 crc kubenswrapper[4631]: I1129 04:47:43.703399 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k775b" event={"ID":"87d660d6-6ecf-4ac5-8afa-a47a01723692","Type":"ContainerDied","Data":"40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3"} Nov 29 04:47:44 crc kubenswrapper[4631]: I1129 04:47:44.731323 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k775b" event={"ID":"87d660d6-6ecf-4ac5-8afa-a47a01723692","Type":"ContainerStarted","Data":"025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99"} Nov 29 04:47:44 crc kubenswrapper[4631]: I1129 04:47:44.759668 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k775b" podStartSLOduration=2.179250683 podStartE2EDuration="5.759648594s" podCreationTimestamp="2025-11-29 04:47:39 +0000 UTC" firstStartedPulling="2025-11-29 04:47:40.672063623 +0000 UTC m=+2187.736567147" lastFinishedPulling="2025-11-29 04:47:44.252461544 +0000 UTC m=+2191.316965058" observedRunningTime="2025-11-29 04:47:44.753934863 +0000 UTC m=+2191.818438397" watchObservedRunningTime="2025-11-29 04:47:44.759648594 +0000 UTC m=+2191.824152108" Nov 29 04:47:49 crc kubenswrapper[4631]: I1129 04:47:49.587211 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:49 crc kubenswrapper[4631]: I1129 04:47:49.587871 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:49 crc kubenswrapper[4631]: I1129 04:47:49.647157 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:49 crc kubenswrapper[4631]: I1129 04:47:49.874623 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:49 crc kubenswrapper[4631]: I1129 04:47:49.930487 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k775b"] Nov 29 04:47:50 crc kubenswrapper[4631]: I1129 04:47:50.716416 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:47:50 crc kubenswrapper[4631]: I1129 04:47:50.716722 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:47:50 crc kubenswrapper[4631]: I1129 04:47:50.716776 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:47:50 crc kubenswrapper[4631]: I1129 04:47:50.717537 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"91c1e60f055fd84296684a0bad54cde6b8f4cb334611b9aec94a6b3673703fab"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:47:50 crc kubenswrapper[4631]: I1129 04:47:50.717591 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://91c1e60f055fd84296684a0bad54cde6b8f4cb334611b9aec94a6b3673703fab" gracePeriod=600 Nov 29 04:47:51 crc kubenswrapper[4631]: I1129 04:47:51.831318 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="91c1e60f055fd84296684a0bad54cde6b8f4cb334611b9aec94a6b3673703fab" exitCode=0 Nov 29 04:47:51 crc kubenswrapper[4631]: I1129 04:47:51.831385 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"91c1e60f055fd84296684a0bad54cde6b8f4cb334611b9aec94a6b3673703fab"} Nov 29 04:47:51 crc kubenswrapper[4631]: I1129 04:47:51.831979 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f"} Nov 29 04:47:51 crc kubenswrapper[4631]: I1129 04:47:51.832013 4631 scope.go:117] "RemoveContainer" containerID="f094e233cd6504a7e5d53b221bdf3c7ac87a77ca769fb16642a5f57a758b26a2" Nov 29 04:47:51 crc kubenswrapper[4631]: I1129 04:47:51.832122 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k775b" podUID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerName="registry-server" containerID="cri-o://025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99" gracePeriod=2 Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.413694 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.560440 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9slcr\" (UniqueName: \"kubernetes.io/projected/87d660d6-6ecf-4ac5-8afa-a47a01723692-kube-api-access-9slcr\") pod \"87d660d6-6ecf-4ac5-8afa-a47a01723692\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.560477 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-utilities\") pod \"87d660d6-6ecf-4ac5-8afa-a47a01723692\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.560663 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-catalog-content\") pod \"87d660d6-6ecf-4ac5-8afa-a47a01723692\" (UID: \"87d660d6-6ecf-4ac5-8afa-a47a01723692\") " Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.561729 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-utilities" (OuterVolumeSpecName: "utilities") pod "87d660d6-6ecf-4ac5-8afa-a47a01723692" (UID: "87d660d6-6ecf-4ac5-8afa-a47a01723692"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.580840 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87d660d6-6ecf-4ac5-8afa-a47a01723692-kube-api-access-9slcr" (OuterVolumeSpecName: "kube-api-access-9slcr") pod "87d660d6-6ecf-4ac5-8afa-a47a01723692" (UID: "87d660d6-6ecf-4ac5-8afa-a47a01723692"). InnerVolumeSpecName "kube-api-access-9slcr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.606086 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87d660d6-6ecf-4ac5-8afa-a47a01723692" (UID: "87d660d6-6ecf-4ac5-8afa-a47a01723692"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.662200 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9slcr\" (UniqueName: \"kubernetes.io/projected/87d660d6-6ecf-4ac5-8afa-a47a01723692-kube-api-access-9slcr\") on node \"crc\" DevicePath \"\"" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.663443 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.663736 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87d660d6-6ecf-4ac5-8afa-a47a01723692-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.844093 4631 generic.go:334] "Generic (PLEG): container finished" podID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerID="025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99" exitCode=0 Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.844132 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k775b" event={"ID":"87d660d6-6ecf-4ac5-8afa-a47a01723692","Type":"ContainerDied","Data":"025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99"} Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.844160 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k775b" event={"ID":"87d660d6-6ecf-4ac5-8afa-a47a01723692","Type":"ContainerDied","Data":"beccd3ecf201acda00f65ec2c5d22f5a6351849731bcc9d0b08cf0acc50ee0ba"} Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.844177 4631 scope.go:117] "RemoveContainer" containerID="025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.844276 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k775b" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.899087 4631 scope.go:117] "RemoveContainer" containerID="40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.907426 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k775b"] Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.926913 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k775b"] Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.927947 4631 scope.go:117] "RemoveContainer" containerID="14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.973904 4631 scope.go:117] "RemoveContainer" containerID="025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99" Nov 29 04:47:52 crc kubenswrapper[4631]: E1129 04:47:52.974432 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99\": container with ID starting with 025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99 not found: ID does not exist" containerID="025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.974529 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99"} err="failed to get container status \"025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99\": rpc error: code = NotFound desc = could not find container \"025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99\": container with ID starting with 025adcadef70ca82fa2a7edc44876d22d1ab178950f45c7dcd22a27af4d24a99 not found: ID does not exist" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.974605 4631 scope.go:117] "RemoveContainer" containerID="40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3" Nov 29 04:47:52 crc kubenswrapper[4631]: E1129 04:47:52.975045 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3\": container with ID starting with 40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3 not found: ID does not exist" containerID="40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.975134 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3"} err="failed to get container status \"40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3\": rpc error: code = NotFound desc = could not find container \"40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3\": container with ID starting with 40928ecbfe86d938eb12fe546a30c35f22ced31b8235fe867c86d6c5a61a31f3 not found: ID does not exist" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.975197 4631 scope.go:117] "RemoveContainer" containerID="14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd" Nov 29 04:47:52 crc kubenswrapper[4631]: E1129 04:47:52.975597 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd\": container with ID starting with 14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd not found: ID does not exist" containerID="14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd" Nov 29 04:47:52 crc kubenswrapper[4631]: I1129 04:47:52.975659 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd"} err="failed to get container status \"14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd\": rpc error: code = NotFound desc = could not find container \"14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd\": container with ID starting with 14c84f81676bb6064aa20a4ea3125f6755dc3fe4d42c2e4182a73fd753b96edd not found: ID does not exist" Nov 29 04:47:53 crc kubenswrapper[4631]: I1129 04:47:53.229641 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87d660d6-6ecf-4ac5-8afa-a47a01723692" path="/var/lib/kubelet/pods/87d660d6-6ecf-4ac5-8afa-a47a01723692/volumes" Nov 29 04:48:05 crc kubenswrapper[4631]: I1129 04:48:05.986612 4631 generic.go:334] "Generic (PLEG): container finished" podID="1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d" containerID="5bf28edf29266c8864a078cfd895224cc10f7d47140a3573b38e977fc7c64d06" exitCode=0 Nov 29 04:48:05 crc kubenswrapper[4631]: I1129 04:48:05.986913 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" event={"ID":"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d","Type":"ContainerDied","Data":"5bf28edf29266c8864a078cfd895224cc10f7d47140a3573b38e977fc7c64d06"} Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.656984 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.722185 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99dmf\" (UniqueName: \"kubernetes.io/projected/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-kube-api-access-99dmf\") pod \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.722246 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-inventory\") pod \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.722313 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovncontroller-config-0\") pod \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.722348 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovn-combined-ca-bundle\") pod \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.722408 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ssh-key\") pod \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\" (UID: \"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d\") " Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.729688 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-kube-api-access-99dmf" (OuterVolumeSpecName: "kube-api-access-99dmf") pod "1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d" (UID: "1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d"). InnerVolumeSpecName "kube-api-access-99dmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.732494 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d" (UID: "1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.746412 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d" (UID: "1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.759580 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-inventory" (OuterVolumeSpecName: "inventory") pod "1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d" (UID: "1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.762344 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d" (UID: "1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.823949 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-99dmf\" (UniqueName: \"kubernetes.io/projected/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-kube-api-access-99dmf\") on node \"crc\" DevicePath \"\"" Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.823981 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.823991 4631 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.824003 4631 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:48:07 crc kubenswrapper[4631]: I1129 04:48:07.824013 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.007427 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" event={"ID":"1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d","Type":"ContainerDied","Data":"aded0a01454257b46e07cd3e0b076dd8b307d65f9ac5091ef2be1ec57fdec3dd"} Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.007816 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aded0a01454257b46e07cd3e0b076dd8b307d65f9ac5091ef2be1ec57fdec3dd" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.007477 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-msq8v" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.286522 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8"] Nov 29 04:48:08 crc kubenswrapper[4631]: E1129 04:48:08.286919 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerName="extract-utilities" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.286940 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerName="extract-utilities" Nov 29 04:48:08 crc kubenswrapper[4631]: E1129 04:48:08.286969 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerName="registry-server" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.286978 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerName="registry-server" Nov 29 04:48:08 crc kubenswrapper[4631]: E1129 04:48:08.287008 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.287016 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 29 04:48:08 crc kubenswrapper[4631]: E1129 04:48:08.287032 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerName="extract-content" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.287040 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerName="extract-content" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.287244 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="87d660d6-6ecf-4ac5-8afa-a47a01723692" containerName="registry-server" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.287262 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.287980 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.290768 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.291589 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.291770 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.291960 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.296422 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.296791 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.306069 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8"] Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.333369 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.333510 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.333544 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.333579 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.333613 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.333663 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckwls\" (UniqueName: \"kubernetes.io/projected/62539716-e710-4274-a860-22590e2d5861-kube-api-access-ckwls\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.435202 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.435252 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.435284 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.435325 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckwls\" (UniqueName: \"kubernetes.io/projected/62539716-e710-4274-a860-22590e2d5861-kube-api-access-ckwls\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.435382 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.435474 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.440817 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.444684 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.445291 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.445396 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.455121 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.456949 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckwls\" (UniqueName: \"kubernetes.io/projected/62539716-e710-4274-a860-22590e2d5861-kube-api-access-ckwls\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:08 crc kubenswrapper[4631]: I1129 04:48:08.608278 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:48:09 crc kubenswrapper[4631]: I1129 04:48:09.191691 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8"] Nov 29 04:48:09 crc kubenswrapper[4631]: W1129 04:48:09.194639 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62539716_e710_4274_a860_22590e2d5861.slice/crio-99eeb43bf79fd294b7491be8b0341a5fdc1926bb77b8961f93cd8d136d7ede15 WatchSource:0}: Error finding container 99eeb43bf79fd294b7491be8b0341a5fdc1926bb77b8961f93cd8d136d7ede15: Status 404 returned error can't find the container with id 99eeb43bf79fd294b7491be8b0341a5fdc1926bb77b8961f93cd8d136d7ede15 Nov 29 04:48:10 crc kubenswrapper[4631]: I1129 04:48:10.023715 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" event={"ID":"62539716-e710-4274-a860-22590e2d5861","Type":"ContainerStarted","Data":"99eeb43bf79fd294b7491be8b0341a5fdc1926bb77b8961f93cd8d136d7ede15"} Nov 29 04:48:11 crc kubenswrapper[4631]: I1129 04:48:11.034299 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" event={"ID":"62539716-e710-4274-a860-22590e2d5861","Type":"ContainerStarted","Data":"e8900a1281ca9abc012a4a3c3b97f484ccec4881dde03da0a696a6544990fe23"} Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.680049 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" podStartSLOduration=12.088942236 podStartE2EDuration="12.680029069s" podCreationTimestamp="2025-11-29 04:48:08 +0000 UTC" firstStartedPulling="2025-11-29 04:48:09.198135766 +0000 UTC m=+2216.262639280" lastFinishedPulling="2025-11-29 04:48:09.789222599 +0000 UTC m=+2216.853726113" observedRunningTime="2025-11-29 04:48:11.062850744 +0000 UTC m=+2218.127354268" watchObservedRunningTime="2025-11-29 04:48:20.680029069 +0000 UTC m=+2227.744532583" Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.685900 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d6qs7"] Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.688146 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.698045 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d6qs7"] Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.821499 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-utilities\") pod \"redhat-marketplace-d6qs7\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.821720 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-catalog-content\") pod \"redhat-marketplace-d6qs7\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.822152 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2sd6\" (UniqueName: \"kubernetes.io/projected/0828cad2-e5d0-48d3-bba0-3af3728e4901-kube-api-access-t2sd6\") pod \"redhat-marketplace-d6qs7\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.923435 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-utilities\") pod \"redhat-marketplace-d6qs7\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.923481 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-catalog-content\") pod \"redhat-marketplace-d6qs7\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.923584 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2sd6\" (UniqueName: \"kubernetes.io/projected/0828cad2-e5d0-48d3-bba0-3af3728e4901-kube-api-access-t2sd6\") pod \"redhat-marketplace-d6qs7\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.924183 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-utilities\") pod \"redhat-marketplace-d6qs7\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.924235 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-catalog-content\") pod \"redhat-marketplace-d6qs7\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:20 crc kubenswrapper[4631]: I1129 04:48:20.970045 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2sd6\" (UniqueName: \"kubernetes.io/projected/0828cad2-e5d0-48d3-bba0-3af3728e4901-kube-api-access-t2sd6\") pod \"redhat-marketplace-d6qs7\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:21 crc kubenswrapper[4631]: I1129 04:48:21.007762 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:21 crc kubenswrapper[4631]: I1129 04:48:21.523237 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d6qs7"] Nov 29 04:48:22 crc kubenswrapper[4631]: I1129 04:48:22.142079 4631 generic.go:334] "Generic (PLEG): container finished" podID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerID="6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3" exitCode=0 Nov 29 04:48:22 crc kubenswrapper[4631]: I1129 04:48:22.142152 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d6qs7" event={"ID":"0828cad2-e5d0-48d3-bba0-3af3728e4901","Type":"ContainerDied","Data":"6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3"} Nov 29 04:48:22 crc kubenswrapper[4631]: I1129 04:48:22.142470 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d6qs7" event={"ID":"0828cad2-e5d0-48d3-bba0-3af3728e4901","Type":"ContainerStarted","Data":"69be28f487d42aa226deb4e819a7a6cca85982630e4cbd6b3f1f0da204948ca3"} Nov 29 04:48:24 crc kubenswrapper[4631]: I1129 04:48:24.186264 4631 generic.go:334] "Generic (PLEG): container finished" podID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerID="278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d" exitCode=0 Nov 29 04:48:24 crc kubenswrapper[4631]: I1129 04:48:24.186884 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d6qs7" event={"ID":"0828cad2-e5d0-48d3-bba0-3af3728e4901","Type":"ContainerDied","Data":"278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d"} Nov 29 04:48:25 crc kubenswrapper[4631]: I1129 04:48:25.203766 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d6qs7" event={"ID":"0828cad2-e5d0-48d3-bba0-3af3728e4901","Type":"ContainerStarted","Data":"3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7"} Nov 29 04:48:25 crc kubenswrapper[4631]: I1129 04:48:25.229173 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d6qs7" podStartSLOduration=2.737596439 podStartE2EDuration="5.229153088s" podCreationTimestamp="2025-11-29 04:48:20 +0000 UTC" firstStartedPulling="2025-11-29 04:48:22.145599163 +0000 UTC m=+2229.210102697" lastFinishedPulling="2025-11-29 04:48:24.637155832 +0000 UTC m=+2231.701659346" observedRunningTime="2025-11-29 04:48:25.225364644 +0000 UTC m=+2232.289868159" watchObservedRunningTime="2025-11-29 04:48:25.229153088 +0000 UTC m=+2232.293656612" Nov 29 04:48:31 crc kubenswrapper[4631]: I1129 04:48:31.008768 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:31 crc kubenswrapper[4631]: I1129 04:48:31.009225 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:31 crc kubenswrapper[4631]: I1129 04:48:31.073377 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:31 crc kubenswrapper[4631]: I1129 04:48:31.333882 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:31 crc kubenswrapper[4631]: I1129 04:48:31.396263 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d6qs7"] Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.281274 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d6qs7" podUID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerName="registry-server" containerID="cri-o://3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7" gracePeriod=2 Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.744284 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.876922 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-catalog-content\") pod \"0828cad2-e5d0-48d3-bba0-3af3728e4901\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.877218 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2sd6\" (UniqueName: \"kubernetes.io/projected/0828cad2-e5d0-48d3-bba0-3af3728e4901-kube-api-access-t2sd6\") pod \"0828cad2-e5d0-48d3-bba0-3af3728e4901\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.877285 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-utilities\") pod \"0828cad2-e5d0-48d3-bba0-3af3728e4901\" (UID: \"0828cad2-e5d0-48d3-bba0-3af3728e4901\") " Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.878307 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-utilities" (OuterVolumeSpecName: "utilities") pod "0828cad2-e5d0-48d3-bba0-3af3728e4901" (UID: "0828cad2-e5d0-48d3-bba0-3af3728e4901"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.888659 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0828cad2-e5d0-48d3-bba0-3af3728e4901-kube-api-access-t2sd6" (OuterVolumeSpecName: "kube-api-access-t2sd6") pod "0828cad2-e5d0-48d3-bba0-3af3728e4901" (UID: "0828cad2-e5d0-48d3-bba0-3af3728e4901"). InnerVolumeSpecName "kube-api-access-t2sd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.895033 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0828cad2-e5d0-48d3-bba0-3af3728e4901" (UID: "0828cad2-e5d0-48d3-bba0-3af3728e4901"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.980193 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.980228 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2sd6\" (UniqueName: \"kubernetes.io/projected/0828cad2-e5d0-48d3-bba0-3af3728e4901-kube-api-access-t2sd6\") on node \"crc\" DevicePath \"\"" Nov 29 04:48:33 crc kubenswrapper[4631]: I1129 04:48:33.980242 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0828cad2-e5d0-48d3-bba0-3af3728e4901-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.295079 4631 generic.go:334] "Generic (PLEG): container finished" podID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerID="3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7" exitCode=0 Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.295126 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d6qs7" event={"ID":"0828cad2-e5d0-48d3-bba0-3af3728e4901","Type":"ContainerDied","Data":"3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7"} Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.295156 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d6qs7" event={"ID":"0828cad2-e5d0-48d3-bba0-3af3728e4901","Type":"ContainerDied","Data":"69be28f487d42aa226deb4e819a7a6cca85982630e4cbd6b3f1f0da204948ca3"} Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.295160 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d6qs7" Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.295187 4631 scope.go:117] "RemoveContainer" containerID="3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7" Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.338220 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d6qs7"] Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.350872 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d6qs7"] Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.360856 4631 scope.go:117] "RemoveContainer" containerID="278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d" Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.383062 4631 scope.go:117] "RemoveContainer" containerID="6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3" Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.423894 4631 scope.go:117] "RemoveContainer" containerID="3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7" Nov 29 04:48:34 crc kubenswrapper[4631]: E1129 04:48:34.424501 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7\": container with ID starting with 3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7 not found: ID does not exist" containerID="3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7" Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.424551 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7"} err="failed to get container status \"3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7\": rpc error: code = NotFound desc = could not find container \"3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7\": container with ID starting with 3c479dfd3ad5cb901b5c21a5f5f61dc79af6249e54b06137f9568c80019220b7 not found: ID does not exist" Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.424582 4631 scope.go:117] "RemoveContainer" containerID="278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d" Nov 29 04:48:34 crc kubenswrapper[4631]: E1129 04:48:34.425101 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d\": container with ID starting with 278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d not found: ID does not exist" containerID="278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d" Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.425142 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d"} err="failed to get container status \"278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d\": rpc error: code = NotFound desc = could not find container \"278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d\": container with ID starting with 278bda004e76ae3d3583722f5624ee763d4809a1c84eeebf9e4838081f8cd65d not found: ID does not exist" Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.425186 4631 scope.go:117] "RemoveContainer" containerID="6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3" Nov 29 04:48:34 crc kubenswrapper[4631]: E1129 04:48:34.425555 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3\": container with ID starting with 6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3 not found: ID does not exist" containerID="6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3" Nov 29 04:48:34 crc kubenswrapper[4631]: I1129 04:48:34.425584 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3"} err="failed to get container status \"6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3\": rpc error: code = NotFound desc = could not find container \"6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3\": container with ID starting with 6a1b567b1adf645296499d98d3fc63aaa2bbcbc5531eeb650626fb52dbb2c4b3 not found: ID does not exist" Nov 29 04:48:35 crc kubenswrapper[4631]: I1129 04:48:35.228022 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0828cad2-e5d0-48d3-bba0-3af3728e4901" path="/var/lib/kubelet/pods/0828cad2-e5d0-48d3-bba0-3af3728e4901/volumes" Nov 29 04:48:45 crc kubenswrapper[4631]: I1129 04:48:45.371785 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-76cbc8bc95-pd9d4" podUID="dfe18059-91e5-40e0-a1df-f5f56cf4c0d2" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 29 04:49:07 crc kubenswrapper[4631]: I1129 04:49:07.647801 4631 generic.go:334] "Generic (PLEG): container finished" podID="62539716-e710-4274-a860-22590e2d5861" containerID="e8900a1281ca9abc012a4a3c3b97f484ccec4881dde03da0a696a6544990fe23" exitCode=0 Nov 29 04:49:07 crc kubenswrapper[4631]: I1129 04:49:07.647945 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" event={"ID":"62539716-e710-4274-a860-22590e2d5861","Type":"ContainerDied","Data":"e8900a1281ca9abc012a4a3c3b97f484ccec4881dde03da0a696a6544990fe23"} Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.200696 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.306183 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-ssh-key\") pod \"62539716-e710-4274-a860-22590e2d5861\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.306272 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-inventory\") pod \"62539716-e710-4274-a860-22590e2d5861\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.306298 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-metadata-combined-ca-bundle\") pod \"62539716-e710-4274-a860-22590e2d5861\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.306322 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-ovn-metadata-agent-neutron-config-0\") pod \"62539716-e710-4274-a860-22590e2d5861\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.307003 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckwls\" (UniqueName: \"kubernetes.io/projected/62539716-e710-4274-a860-22590e2d5861-kube-api-access-ckwls\") pod \"62539716-e710-4274-a860-22590e2d5861\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.307619 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-nova-metadata-neutron-config-0\") pod \"62539716-e710-4274-a860-22590e2d5861\" (UID: \"62539716-e710-4274-a860-22590e2d5861\") " Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.311745 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62539716-e710-4274-a860-22590e2d5861-kube-api-access-ckwls" (OuterVolumeSpecName: "kube-api-access-ckwls") pod "62539716-e710-4274-a860-22590e2d5861" (UID: "62539716-e710-4274-a860-22590e2d5861"). InnerVolumeSpecName "kube-api-access-ckwls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.313939 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "62539716-e710-4274-a860-22590e2d5861" (UID: "62539716-e710-4274-a860-22590e2d5861"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.335638 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "62539716-e710-4274-a860-22590e2d5861" (UID: "62539716-e710-4274-a860-22590e2d5861"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.339667 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-inventory" (OuterVolumeSpecName: "inventory") pod "62539716-e710-4274-a860-22590e2d5861" (UID: "62539716-e710-4274-a860-22590e2d5861"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.339886 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "62539716-e710-4274-a860-22590e2d5861" (UID: "62539716-e710-4274-a860-22590e2d5861"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.362474 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "62539716-e710-4274-a860-22590e2d5861" (UID: "62539716-e710-4274-a860-22590e2d5861"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.410767 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckwls\" (UniqueName: \"kubernetes.io/projected/62539716-e710-4274-a860-22590e2d5861-kube-api-access-ckwls\") on node \"crc\" DevicePath \"\"" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.410815 4631 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.410834 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.410852 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.410869 4631 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.410889 4631 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/62539716-e710-4274-a860-22590e2d5861-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.673644 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" event={"ID":"62539716-e710-4274-a860-22590e2d5861","Type":"ContainerDied","Data":"99eeb43bf79fd294b7491be8b0341a5fdc1926bb77b8961f93cd8d136d7ede15"} Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.673754 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99eeb43bf79fd294b7491be8b0341a5fdc1926bb77b8961f93cd8d136d7ede15" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.673766 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.871904 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m"] Nov 29 04:49:09 crc kubenswrapper[4631]: E1129 04:49:09.872526 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerName="extract-utilities" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.872612 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerName="extract-utilities" Nov 29 04:49:09 crc kubenswrapper[4631]: E1129 04:49:09.872674 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62539716-e710-4274-a860-22590e2d5861" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.872758 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="62539716-e710-4274-a860-22590e2d5861" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 29 04:49:09 crc kubenswrapper[4631]: E1129 04:49:09.872830 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerName="registry-server" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.872883 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerName="registry-server" Nov 29 04:49:09 crc kubenswrapper[4631]: E1129 04:49:09.872964 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerName="extract-content" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.873017 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerName="extract-content" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.873281 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="0828cad2-e5d0-48d3-bba0-3af3728e4901" containerName="registry-server" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.873393 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="62539716-e710-4274-a860-22590e2d5861" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.874063 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.882683 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.883087 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.883289 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.883608 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.883665 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.891709 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m"] Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.918982 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcpjp\" (UniqueName: \"kubernetes.io/projected/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-kube-api-access-xcpjp\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.919072 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.919121 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.919166 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:09 crc kubenswrapper[4631]: I1129 04:49:09.919222 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.020199 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.020265 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.020348 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcpjp\" (UniqueName: \"kubernetes.io/projected/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-kube-api-access-xcpjp\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.020404 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.020440 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.024895 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.025124 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.026037 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.028689 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.038231 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcpjp\" (UniqueName: \"kubernetes.io/projected/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-kube-api-access-xcpjp\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-vr72m\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.212262 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:49:10 crc kubenswrapper[4631]: I1129 04:49:10.755447 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m"] Nov 29 04:49:11 crc kubenswrapper[4631]: I1129 04:49:11.714996 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" event={"ID":"4c75d7a8-5a02-4b4b-8af4-e83e594a096f","Type":"ContainerStarted","Data":"41a834a0a1c052392a0e59f716635b392e26d076399643b5a10d8cda63bc6d14"} Nov 29 04:49:11 crc kubenswrapper[4631]: I1129 04:49:11.715546 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" event={"ID":"4c75d7a8-5a02-4b4b-8af4-e83e594a096f","Type":"ContainerStarted","Data":"9929bb3f2ff19c14a4caeb21f9ca5e15f73a08af5de1919c79497a24d38cf8ab"} Nov 29 04:49:11 crc kubenswrapper[4631]: I1129 04:49:11.742223 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" podStartSLOduration=2.3044514449999998 podStartE2EDuration="2.742207866s" podCreationTimestamp="2025-11-29 04:49:09 +0000 UTC" firstStartedPulling="2025-11-29 04:49:10.763841316 +0000 UTC m=+2277.828344850" lastFinishedPulling="2025-11-29 04:49:11.201597727 +0000 UTC m=+2278.266101271" observedRunningTime="2025-11-29 04:49:11.73301696 +0000 UTC m=+2278.797520484" watchObservedRunningTime="2025-11-29 04:49:11.742207866 +0000 UTC m=+2278.806711390" Nov 29 04:50:20 crc kubenswrapper[4631]: I1129 04:50:20.716824 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:50:20 crc kubenswrapper[4631]: I1129 04:50:20.717686 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:50:50 crc kubenswrapper[4631]: I1129 04:50:50.715721 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:50:50 crc kubenswrapper[4631]: I1129 04:50:50.716264 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:51:20 crc kubenswrapper[4631]: I1129 04:51:20.715877 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:51:20 crc kubenswrapper[4631]: I1129 04:51:20.716625 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:51:20 crc kubenswrapper[4631]: I1129 04:51:20.716688 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:51:20 crc kubenswrapper[4631]: I1129 04:51:20.717814 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:51:20 crc kubenswrapper[4631]: I1129 04:51:20.717914 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" gracePeriod=600 Nov 29 04:51:20 crc kubenswrapper[4631]: E1129 04:51:20.849961 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:51:21 crc kubenswrapper[4631]: I1129 04:51:21.183364 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" exitCode=0 Nov 29 04:51:21 crc kubenswrapper[4631]: I1129 04:51:21.183454 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f"} Nov 29 04:51:21 crc kubenswrapper[4631]: I1129 04:51:21.183556 4631 scope.go:117] "RemoveContainer" containerID="91c1e60f055fd84296684a0bad54cde6b8f4cb334611b9aec94a6b3673703fab" Nov 29 04:51:21 crc kubenswrapper[4631]: I1129 04:51:21.184720 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:51:21 crc kubenswrapper[4631]: E1129 04:51:21.185540 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:51:35 crc kubenswrapper[4631]: I1129 04:51:35.217289 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:51:35 crc kubenswrapper[4631]: E1129 04:51:35.218457 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:51:49 crc kubenswrapper[4631]: I1129 04:51:49.216597 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:51:49 crc kubenswrapper[4631]: E1129 04:51:49.217742 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:52:00 crc kubenswrapper[4631]: I1129 04:52:00.216712 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:52:00 crc kubenswrapper[4631]: E1129 04:52:00.217819 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:52:11 crc kubenswrapper[4631]: I1129 04:52:11.216222 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:52:11 crc kubenswrapper[4631]: E1129 04:52:11.216994 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:52:24 crc kubenswrapper[4631]: I1129 04:52:24.217921 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:52:24 crc kubenswrapper[4631]: E1129 04:52:24.218965 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:52:39 crc kubenswrapper[4631]: I1129 04:52:39.219084 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:52:39 crc kubenswrapper[4631]: E1129 04:52:39.220840 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:52:53 crc kubenswrapper[4631]: I1129 04:52:53.230301 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:52:53 crc kubenswrapper[4631]: E1129 04:52:53.231695 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:53:06 crc kubenswrapper[4631]: I1129 04:53:06.216750 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:53:06 crc kubenswrapper[4631]: E1129 04:53:06.217523 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:53:19 crc kubenswrapper[4631]: I1129 04:53:19.217310 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:53:19 crc kubenswrapper[4631]: E1129 04:53:19.218500 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:53:30 crc kubenswrapper[4631]: I1129 04:53:30.217271 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:53:30 crc kubenswrapper[4631]: E1129 04:53:30.218481 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:53:45 crc kubenswrapper[4631]: I1129 04:53:45.217783 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:53:45 crc kubenswrapper[4631]: E1129 04:53:45.218573 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:54:00 crc kubenswrapper[4631]: I1129 04:54:00.219304 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:54:00 crc kubenswrapper[4631]: E1129 04:54:00.221526 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:54:07 crc kubenswrapper[4631]: I1129 04:54:07.968694 4631 generic.go:334] "Generic (PLEG): container finished" podID="4c75d7a8-5a02-4b4b-8af4-e83e594a096f" containerID="41a834a0a1c052392a0e59f716635b392e26d076399643b5a10d8cda63bc6d14" exitCode=0 Nov 29 04:54:07 crc kubenswrapper[4631]: I1129 04:54:07.968816 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" event={"ID":"4c75d7a8-5a02-4b4b-8af4-e83e594a096f","Type":"ContainerDied","Data":"41a834a0a1c052392a0e59f716635b392e26d076399643b5a10d8cda63bc6d14"} Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.429988 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.462318 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-inventory\") pod \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.462408 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-secret-0\") pod \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.462442 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-ssh-key\") pod \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.462484 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcpjp\" (UniqueName: \"kubernetes.io/projected/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-kube-api-access-xcpjp\") pod \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.462544 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-combined-ca-bundle\") pod \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\" (UID: \"4c75d7a8-5a02-4b4b-8af4-e83e594a096f\") " Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.469442 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-kube-api-access-xcpjp" (OuterVolumeSpecName: "kube-api-access-xcpjp") pod "4c75d7a8-5a02-4b4b-8af4-e83e594a096f" (UID: "4c75d7a8-5a02-4b4b-8af4-e83e594a096f"). InnerVolumeSpecName "kube-api-access-xcpjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.469546 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "4c75d7a8-5a02-4b4b-8af4-e83e594a096f" (UID: "4c75d7a8-5a02-4b4b-8af4-e83e594a096f"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.490635 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4c75d7a8-5a02-4b4b-8af4-e83e594a096f" (UID: "4c75d7a8-5a02-4b4b-8af4-e83e594a096f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.496682 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "4c75d7a8-5a02-4b4b-8af4-e83e594a096f" (UID: "4c75d7a8-5a02-4b4b-8af4-e83e594a096f"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.502265 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-inventory" (OuterVolumeSpecName: "inventory") pod "4c75d7a8-5a02-4b4b-8af4-e83e594a096f" (UID: "4c75d7a8-5a02-4b4b-8af4-e83e594a096f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.564884 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcpjp\" (UniqueName: \"kubernetes.io/projected/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-kube-api-access-xcpjp\") on node \"crc\" DevicePath \"\"" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.564931 4631 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.564954 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.564973 4631 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.564989 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c75d7a8-5a02-4b4b-8af4-e83e594a096f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.990010 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" event={"ID":"4c75d7a8-5a02-4b4b-8af4-e83e594a096f","Type":"ContainerDied","Data":"9929bb3f2ff19c14a4caeb21f9ca5e15f73a08af5de1919c79497a24d38cf8ab"} Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.990249 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9929bb3f2ff19c14a4caeb21f9ca5e15f73a08af5de1919c79497a24d38cf8ab" Nov 29 04:54:09 crc kubenswrapper[4631]: I1129 04:54:09.990391 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-vr72m" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.180805 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz"] Nov 29 04:54:10 crc kubenswrapper[4631]: E1129 04:54:10.181271 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c75d7a8-5a02-4b4b-8af4-e83e594a096f" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.181291 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c75d7a8-5a02-4b4b-8af4-e83e594a096f" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.181577 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c75d7a8-5a02-4b4b-8af4-e83e594a096f" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.182366 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.185283 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.185551 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.186075 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.189377 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.189401 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.189605 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.189752 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.197912 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz"] Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.279713 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.279864 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.279907 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.279944 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.280090 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.280146 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.280199 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxwg8\" (UniqueName: \"kubernetes.io/projected/4dc57e41-ec49-4fda-86a7-2d339d19003b-kube-api-access-fxwg8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.280219 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.280764 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.382750 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.382800 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.382819 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.382839 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.382887 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.382914 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.382943 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxwg8\" (UniqueName: \"kubernetes.io/projected/4dc57e41-ec49-4fda-86a7-2d339d19003b-kube-api-access-fxwg8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.382963 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.383018 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.384289 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.386397 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.386893 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.387030 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.387849 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.388274 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.389395 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.389528 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.403241 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxwg8\" (UniqueName: \"kubernetes.io/projected/4dc57e41-ec49-4fda-86a7-2d339d19003b-kube-api-access-fxwg8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-42rzz\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:10 crc kubenswrapper[4631]: I1129 04:54:10.504852 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:54:11 crc kubenswrapper[4631]: I1129 04:54:11.112548 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz"] Nov 29 04:54:11 crc kubenswrapper[4631]: I1129 04:54:11.122642 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 04:54:11 crc kubenswrapper[4631]: I1129 04:54:11.217617 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:54:11 crc kubenswrapper[4631]: E1129 04:54:11.217888 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:54:12 crc kubenswrapper[4631]: I1129 04:54:12.016795 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" event={"ID":"4dc57e41-ec49-4fda-86a7-2d339d19003b","Type":"ContainerStarted","Data":"e032c2e4631b1dfe28c88f2d83c27d824de014445066e381e545e0e758114d1a"} Nov 29 04:54:12 crc kubenswrapper[4631]: I1129 04:54:12.017181 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" event={"ID":"4dc57e41-ec49-4fda-86a7-2d339d19003b","Type":"ContainerStarted","Data":"8ebc91283b60e9671adce8a2ce6017dc809615d76c421aaf289e1a62f2fc7b73"} Nov 29 04:54:12 crc kubenswrapper[4631]: I1129 04:54:12.058093 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" podStartSLOduration=1.595484043 podStartE2EDuration="2.05800196s" podCreationTimestamp="2025-11-29 04:54:10 +0000 UTC" firstStartedPulling="2025-11-29 04:54:11.122457626 +0000 UTC m=+2578.186961140" lastFinishedPulling="2025-11-29 04:54:11.584975533 +0000 UTC m=+2578.649479057" observedRunningTime="2025-11-29 04:54:12.051730105 +0000 UTC m=+2579.116233659" watchObservedRunningTime="2025-11-29 04:54:12.05800196 +0000 UTC m=+2579.122505504" Nov 29 04:54:23 crc kubenswrapper[4631]: I1129 04:54:23.229823 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:54:23 crc kubenswrapper[4631]: E1129 04:54:23.230926 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:54:35 crc kubenswrapper[4631]: I1129 04:54:35.217744 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:54:35 crc kubenswrapper[4631]: E1129 04:54:35.218840 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.456700 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kjk2b"] Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.477049 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.498119 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kjk2b"] Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.597598 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-utilities\") pod \"redhat-operators-kjk2b\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.597668 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-catalog-content\") pod \"redhat-operators-kjk2b\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.597715 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jzp2\" (UniqueName: \"kubernetes.io/projected/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-kube-api-access-8jzp2\") pod \"redhat-operators-kjk2b\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.699620 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-catalog-content\") pod \"redhat-operators-kjk2b\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.699686 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jzp2\" (UniqueName: \"kubernetes.io/projected/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-kube-api-access-8jzp2\") pod \"redhat-operators-kjk2b\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.699800 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-utilities\") pod \"redhat-operators-kjk2b\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.700119 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-catalog-content\") pod \"redhat-operators-kjk2b\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.700435 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-utilities\") pod \"redhat-operators-kjk2b\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.722021 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jzp2\" (UniqueName: \"kubernetes.io/projected/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-kube-api-access-8jzp2\") pod \"redhat-operators-kjk2b\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:44 crc kubenswrapper[4631]: I1129 04:54:44.835756 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:45 crc kubenswrapper[4631]: I1129 04:54:45.312455 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kjk2b"] Nov 29 04:54:45 crc kubenswrapper[4631]: I1129 04:54:45.410221 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjk2b" event={"ID":"abd78264-abea-4f33-b4c7-ffdbcbe3c65e","Type":"ContainerStarted","Data":"592fbe61b400a767c423e598250b7d662b971394eed52d35ae7f718c9032248f"} Nov 29 04:54:46 crc kubenswrapper[4631]: I1129 04:54:46.424731 4631 generic.go:334] "Generic (PLEG): container finished" podID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerID="95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d" exitCode=0 Nov 29 04:54:46 crc kubenswrapper[4631]: I1129 04:54:46.425035 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjk2b" event={"ID":"abd78264-abea-4f33-b4c7-ffdbcbe3c65e","Type":"ContainerDied","Data":"95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d"} Nov 29 04:54:48 crc kubenswrapper[4631]: I1129 04:54:48.946709 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjk2b" event={"ID":"abd78264-abea-4f33-b4c7-ffdbcbe3c65e","Type":"ContainerStarted","Data":"7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01"} Nov 29 04:54:50 crc kubenswrapper[4631]: I1129 04:54:50.216244 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:54:50 crc kubenswrapper[4631]: E1129 04:54:50.217014 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:54:51 crc kubenswrapper[4631]: I1129 04:54:51.980535 4631 generic.go:334] "Generic (PLEG): container finished" podID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerID="7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01" exitCode=0 Nov 29 04:54:51 crc kubenswrapper[4631]: I1129 04:54:51.981173 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjk2b" event={"ID":"abd78264-abea-4f33-b4c7-ffdbcbe3c65e","Type":"ContainerDied","Data":"7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01"} Nov 29 04:54:52 crc kubenswrapper[4631]: I1129 04:54:52.995590 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjk2b" event={"ID":"abd78264-abea-4f33-b4c7-ffdbcbe3c65e","Type":"ContainerStarted","Data":"a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2"} Nov 29 04:54:53 crc kubenswrapper[4631]: I1129 04:54:53.026352 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kjk2b" podStartSLOduration=2.865606163 podStartE2EDuration="9.026317681s" podCreationTimestamp="2025-11-29 04:54:44 +0000 UTC" firstStartedPulling="2025-11-29 04:54:46.431465394 +0000 UTC m=+2613.495968918" lastFinishedPulling="2025-11-29 04:54:52.592176912 +0000 UTC m=+2619.656680436" observedRunningTime="2025-11-29 04:54:53.019871662 +0000 UTC m=+2620.084375186" watchObservedRunningTime="2025-11-29 04:54:53.026317681 +0000 UTC m=+2620.090821205" Nov 29 04:54:54 crc kubenswrapper[4631]: I1129 04:54:54.836878 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:54 crc kubenswrapper[4631]: I1129 04:54:54.837207 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:54:55 crc kubenswrapper[4631]: I1129 04:54:55.892463 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kjk2b" podUID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerName="registry-server" probeResult="failure" output=< Nov 29 04:54:55 crc kubenswrapper[4631]: timeout: failed to connect service ":50051" within 1s Nov 29 04:54:55 crc kubenswrapper[4631]: > Nov 29 04:55:04 crc kubenswrapper[4631]: I1129 04:55:04.899172 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:55:04 crc kubenswrapper[4631]: I1129 04:55:04.949466 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:55:05 crc kubenswrapper[4631]: I1129 04:55:05.143822 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kjk2b"] Nov 29 04:55:05 crc kubenswrapper[4631]: I1129 04:55:05.216676 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:55:05 crc kubenswrapper[4631]: E1129 04:55:05.217122 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.115818 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kjk2b" podUID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerName="registry-server" containerID="cri-o://a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2" gracePeriod=2 Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.601594 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.757674 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-utilities\") pod \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.757755 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jzp2\" (UniqueName: \"kubernetes.io/projected/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-kube-api-access-8jzp2\") pod \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.757784 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-catalog-content\") pod \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\" (UID: \"abd78264-abea-4f33-b4c7-ffdbcbe3c65e\") " Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.759089 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-utilities" (OuterVolumeSpecName: "utilities") pod "abd78264-abea-4f33-b4c7-ffdbcbe3c65e" (UID: "abd78264-abea-4f33-b4c7-ffdbcbe3c65e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.765968 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-kube-api-access-8jzp2" (OuterVolumeSpecName: "kube-api-access-8jzp2") pod "abd78264-abea-4f33-b4c7-ffdbcbe3c65e" (UID: "abd78264-abea-4f33-b4c7-ffdbcbe3c65e"). InnerVolumeSpecName "kube-api-access-8jzp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.861028 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.861061 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jzp2\" (UniqueName: \"kubernetes.io/projected/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-kube-api-access-8jzp2\") on node \"crc\" DevicePath \"\"" Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.885111 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "abd78264-abea-4f33-b4c7-ffdbcbe3c65e" (UID: "abd78264-abea-4f33-b4c7-ffdbcbe3c65e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:55:06 crc kubenswrapper[4631]: I1129 04:55:06.962547 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abd78264-abea-4f33-b4c7-ffdbcbe3c65e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.128156 4631 generic.go:334] "Generic (PLEG): container finished" podID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerID="a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2" exitCode=0 Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.128197 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjk2b" event={"ID":"abd78264-abea-4f33-b4c7-ffdbcbe3c65e","Type":"ContainerDied","Data":"a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2"} Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.128223 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kjk2b" event={"ID":"abd78264-abea-4f33-b4c7-ffdbcbe3c65e","Type":"ContainerDied","Data":"592fbe61b400a767c423e598250b7d662b971394eed52d35ae7f718c9032248f"} Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.128229 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kjk2b" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.128238 4631 scope.go:117] "RemoveContainer" containerID="a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.159419 4631 scope.go:117] "RemoveContainer" containerID="7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.174289 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kjk2b"] Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.184922 4631 scope.go:117] "RemoveContainer" containerID="95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.191970 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kjk2b"] Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.223531 4631 scope.go:117] "RemoveContainer" containerID="a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2" Nov 29 04:55:07 crc kubenswrapper[4631]: E1129 04:55:07.227596 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2\": container with ID starting with a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2 not found: ID does not exist" containerID="a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.227638 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2"} err="failed to get container status \"a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2\": rpc error: code = NotFound desc = could not find container \"a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2\": container with ID starting with a7c9c4ebbe60d7dd088d3f865ec043d53ecd46c46b931f5b47cc77af0f95b6f2 not found: ID does not exist" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.227664 4631 scope.go:117] "RemoveContainer" containerID="7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01" Nov 29 04:55:07 crc kubenswrapper[4631]: E1129 04:55:07.232790 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01\": container with ID starting with 7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01 not found: ID does not exist" containerID="7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.232853 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01"} err="failed to get container status \"7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01\": rpc error: code = NotFound desc = could not find container \"7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01\": container with ID starting with 7259a342f98932dcd29fda4477fc75afddaff11bd4be3db9be66eebe1f814d01 not found: ID does not exist" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.232897 4631 scope.go:117] "RemoveContainer" containerID="95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d" Nov 29 04:55:07 crc kubenswrapper[4631]: E1129 04:55:07.233523 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d\": container with ID starting with 95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d not found: ID does not exist" containerID="95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.233567 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d"} err="failed to get container status \"95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d\": rpc error: code = NotFound desc = could not find container \"95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d\": container with ID starting with 95cf012ac5e0bfc99358c68e3bf8aa321c02e2a637ac7a78b4b6e273fc0bfa0d not found: ID does not exist" Nov 29 04:55:07 crc kubenswrapper[4631]: I1129 04:55:07.240032 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" path="/var/lib/kubelet/pods/abd78264-abea-4f33-b4c7-ffdbcbe3c65e/volumes" Nov 29 04:55:17 crc kubenswrapper[4631]: I1129 04:55:17.217018 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:55:17 crc kubenswrapper[4631]: E1129 04:55:17.217733 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:55:32 crc kubenswrapper[4631]: I1129 04:55:32.216961 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:55:32 crc kubenswrapper[4631]: E1129 04:55:32.217676 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.754079 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sqckp"] Nov 29 04:55:41 crc kubenswrapper[4631]: E1129 04:55:41.756499 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerName="registry-server" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.756610 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerName="registry-server" Nov 29 04:55:41 crc kubenswrapper[4631]: E1129 04:55:41.756696 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerName="extract-utilities" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.756768 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerName="extract-utilities" Nov 29 04:55:41 crc kubenswrapper[4631]: E1129 04:55:41.756848 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerName="extract-content" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.756922 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerName="extract-content" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.757251 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="abd78264-abea-4f33-b4c7-ffdbcbe3c65e" containerName="registry-server" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.758706 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.784099 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sqckp"] Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.819072 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fad24c1-8766-4462-a5fb-997b472d2952-utilities\") pod \"community-operators-sqckp\" (UID: \"6fad24c1-8766-4462-a5fb-997b472d2952\") " pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.819154 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fad24c1-8766-4462-a5fb-997b472d2952-catalog-content\") pod \"community-operators-sqckp\" (UID: \"6fad24c1-8766-4462-a5fb-997b472d2952\") " pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.819247 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-486rs\" (UniqueName: \"kubernetes.io/projected/6fad24c1-8766-4462-a5fb-997b472d2952-kube-api-access-486rs\") pod \"community-operators-sqckp\" (UID: \"6fad24c1-8766-4462-a5fb-997b472d2952\") " pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.946642 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fad24c1-8766-4462-a5fb-997b472d2952-catalog-content\") pod \"community-operators-sqckp\" (UID: \"6fad24c1-8766-4462-a5fb-997b472d2952\") " pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.946737 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-486rs\" (UniqueName: \"kubernetes.io/projected/6fad24c1-8766-4462-a5fb-997b472d2952-kube-api-access-486rs\") pod \"community-operators-sqckp\" (UID: \"6fad24c1-8766-4462-a5fb-997b472d2952\") " pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.946824 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fad24c1-8766-4462-a5fb-997b472d2952-utilities\") pod \"community-operators-sqckp\" (UID: \"6fad24c1-8766-4462-a5fb-997b472d2952\") " pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.947291 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fad24c1-8766-4462-a5fb-997b472d2952-utilities\") pod \"community-operators-sqckp\" (UID: \"6fad24c1-8766-4462-a5fb-997b472d2952\") " pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.947548 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fad24c1-8766-4462-a5fb-997b472d2952-catalog-content\") pod \"community-operators-sqckp\" (UID: \"6fad24c1-8766-4462-a5fb-997b472d2952\") " pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:41 crc kubenswrapper[4631]: I1129 04:55:41.982367 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-486rs\" (UniqueName: \"kubernetes.io/projected/6fad24c1-8766-4462-a5fb-997b472d2952-kube-api-access-486rs\") pod \"community-operators-sqckp\" (UID: \"6fad24c1-8766-4462-a5fb-997b472d2952\") " pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:42 crc kubenswrapper[4631]: I1129 04:55:42.081312 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:42 crc kubenswrapper[4631]: I1129 04:55:42.423411 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sqckp"] Nov 29 04:55:42 crc kubenswrapper[4631]: I1129 04:55:42.503469 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sqckp" event={"ID":"6fad24c1-8766-4462-a5fb-997b472d2952","Type":"ContainerStarted","Data":"e4b30a4039356685fd9d68eecc41ce6871e6c8333e62ebb94f5f44b8aa2f1c01"} Nov 29 04:55:43 crc kubenswrapper[4631]: I1129 04:55:43.514714 4631 generic.go:334] "Generic (PLEG): container finished" podID="6fad24c1-8766-4462-a5fb-997b472d2952" containerID="158511335c167dedb3e226dc4b52eb60c6961126e088a9e37e7c22ff383aa2f3" exitCode=0 Nov 29 04:55:43 crc kubenswrapper[4631]: I1129 04:55:43.514980 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sqckp" event={"ID":"6fad24c1-8766-4462-a5fb-997b472d2952","Type":"ContainerDied","Data":"158511335c167dedb3e226dc4b52eb60c6961126e088a9e37e7c22ff383aa2f3"} Nov 29 04:55:47 crc kubenswrapper[4631]: I1129 04:55:47.217746 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:55:47 crc kubenswrapper[4631]: E1129 04:55:47.219088 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:55:48 crc kubenswrapper[4631]: I1129 04:55:48.567751 4631 generic.go:334] "Generic (PLEG): container finished" podID="6fad24c1-8766-4462-a5fb-997b472d2952" containerID="d6a646c3d8a6988f831707b9c62166313a2e3107d3622f29b587f6478cf8be92" exitCode=0 Nov 29 04:55:48 crc kubenswrapper[4631]: I1129 04:55:48.567821 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sqckp" event={"ID":"6fad24c1-8766-4462-a5fb-997b472d2952","Type":"ContainerDied","Data":"d6a646c3d8a6988f831707b9c62166313a2e3107d3622f29b587f6478cf8be92"} Nov 29 04:55:49 crc kubenswrapper[4631]: I1129 04:55:49.576702 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sqckp" event={"ID":"6fad24c1-8766-4462-a5fb-997b472d2952","Type":"ContainerStarted","Data":"6f189743b9ce246e9b2ff956b5e849922b2d6e90bae96ac309420bc7b78ffb7b"} Nov 29 04:55:49 crc kubenswrapper[4631]: I1129 04:55:49.626991 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sqckp" podStartSLOduration=3.070207671 podStartE2EDuration="8.626972571s" podCreationTimestamp="2025-11-29 04:55:41 +0000 UTC" firstStartedPulling="2025-11-29 04:55:43.517261493 +0000 UTC m=+2670.581765017" lastFinishedPulling="2025-11-29 04:55:49.074026393 +0000 UTC m=+2676.138529917" observedRunningTime="2025-11-29 04:55:49.614543474 +0000 UTC m=+2676.679046998" watchObservedRunningTime="2025-11-29 04:55:49.626972571 +0000 UTC m=+2676.691476085" Nov 29 04:55:52 crc kubenswrapper[4631]: I1129 04:55:52.082417 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:52 crc kubenswrapper[4631]: I1129 04:55:52.084425 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:55:52 crc kubenswrapper[4631]: I1129 04:55:52.164440 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:56:00 crc kubenswrapper[4631]: I1129 04:56:00.216996 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:56:00 crc kubenswrapper[4631]: E1129 04:56:00.218061 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:56:02 crc kubenswrapper[4631]: I1129 04:56:02.175583 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sqckp" Nov 29 04:56:02 crc kubenswrapper[4631]: I1129 04:56:02.287116 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sqckp"] Nov 29 04:56:02 crc kubenswrapper[4631]: I1129 04:56:02.336368 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5gtpk"] Nov 29 04:56:02 crc kubenswrapper[4631]: I1129 04:56:02.337442 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5gtpk" podUID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerName="registry-server" containerID="cri-o://fc383deb83a72fdbc9a3b4653d39c74a1dbc133069ed04527a2ee87c429aa4a0" gracePeriod=2 Nov 29 04:56:02 crc kubenswrapper[4631]: I1129 04:56:02.728718 4631 generic.go:334] "Generic (PLEG): container finished" podID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerID="fc383deb83a72fdbc9a3b4653d39c74a1dbc133069ed04527a2ee87c429aa4a0" exitCode=0 Nov 29 04:56:02 crc kubenswrapper[4631]: I1129 04:56:02.728793 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5gtpk" event={"ID":"9f2dce73-ee2d-4566-bdc2-e5daf964ca77","Type":"ContainerDied","Data":"fc383deb83a72fdbc9a3b4653d39c74a1dbc133069ed04527a2ee87c429aa4a0"} Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.386632 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.532021 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-catalog-content\") pod \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.532103 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hddsx\" (UniqueName: \"kubernetes.io/projected/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-kube-api-access-hddsx\") pod \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.532313 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-utilities\") pod \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\" (UID: \"9f2dce73-ee2d-4566-bdc2-e5daf964ca77\") " Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.533115 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-utilities" (OuterVolumeSpecName: "utilities") pod "9f2dce73-ee2d-4566-bdc2-e5daf964ca77" (UID: "9f2dce73-ee2d-4566-bdc2-e5daf964ca77"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.542489 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-kube-api-access-hddsx" (OuterVolumeSpecName: "kube-api-access-hddsx") pod "9f2dce73-ee2d-4566-bdc2-e5daf964ca77" (UID: "9f2dce73-ee2d-4566-bdc2-e5daf964ca77"). InnerVolumeSpecName "kube-api-access-hddsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.583261 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9f2dce73-ee2d-4566-bdc2-e5daf964ca77" (UID: "9f2dce73-ee2d-4566-bdc2-e5daf964ca77"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.634185 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.634217 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.634229 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hddsx\" (UniqueName: \"kubernetes.io/projected/9f2dce73-ee2d-4566-bdc2-e5daf964ca77-kube-api-access-hddsx\") on node \"crc\" DevicePath \"\"" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.739860 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5gtpk" event={"ID":"9f2dce73-ee2d-4566-bdc2-e5daf964ca77","Type":"ContainerDied","Data":"d4fc6bbd4dc9cdb4c0ebde132c730af4e5d5d94b63fc62f9329b1c4d61c1f9b8"} Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.739917 4631 scope.go:117] "RemoveContainer" containerID="fc383deb83a72fdbc9a3b4653d39c74a1dbc133069ed04527a2ee87c429aa4a0" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.739922 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5gtpk" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.762438 4631 scope.go:117] "RemoveContainer" containerID="f71ab882924f8d98bb9a4d23877ba3bdded0e7032c8b5f7a67f70b3125626613" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.816338 4631 scope.go:117] "RemoveContainer" containerID="c81274a7158ac0505b263d6ffdaca587dbaa723f742d5e8bc09b541c02132ece" Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.816469 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5gtpk"] Nov 29 04:56:03 crc kubenswrapper[4631]: I1129 04:56:03.826581 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5gtpk"] Nov 29 04:56:05 crc kubenswrapper[4631]: I1129 04:56:05.227063 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" path="/var/lib/kubelet/pods/9f2dce73-ee2d-4566-bdc2-e5daf964ca77/volumes" Nov 29 04:56:15 crc kubenswrapper[4631]: I1129 04:56:15.217623 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:56:15 crc kubenswrapper[4631]: E1129 04:56:15.218546 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 04:56:30 crc kubenswrapper[4631]: I1129 04:56:30.217181 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 04:56:31 crc kubenswrapper[4631]: I1129 04:56:31.074201 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"b7fb9a7e18ea2e496f485c0bfb951b874a500fca1fbcb82c5e3f2b282902676a"} Nov 29 04:57:33 crc kubenswrapper[4631]: I1129 04:57:33.697670 4631 generic.go:334] "Generic (PLEG): container finished" podID="4dc57e41-ec49-4fda-86a7-2d339d19003b" containerID="e032c2e4631b1dfe28c88f2d83c27d824de014445066e381e545e0e758114d1a" exitCode=0 Nov 29 04:57:33 crc kubenswrapper[4631]: I1129 04:57:33.697732 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" event={"ID":"4dc57e41-ec49-4fda-86a7-2d339d19003b","Type":"ContainerDied","Data":"e032c2e4631b1dfe28c88f2d83c27d824de014445066e381e545e0e758114d1a"} Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.206428 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.371232 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-0\") pod \"4dc57e41-ec49-4fda-86a7-2d339d19003b\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.371563 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-extra-config-0\") pod \"4dc57e41-ec49-4fda-86a7-2d339d19003b\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.371635 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-1\") pod \"4dc57e41-ec49-4fda-86a7-2d339d19003b\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.371682 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-0\") pod \"4dc57e41-ec49-4fda-86a7-2d339d19003b\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.371709 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-inventory\") pod \"4dc57e41-ec49-4fda-86a7-2d339d19003b\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.371769 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-1\") pod \"4dc57e41-ec49-4fda-86a7-2d339d19003b\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.371805 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxwg8\" (UniqueName: \"kubernetes.io/projected/4dc57e41-ec49-4fda-86a7-2d339d19003b-kube-api-access-fxwg8\") pod \"4dc57e41-ec49-4fda-86a7-2d339d19003b\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.371867 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-ssh-key\") pod \"4dc57e41-ec49-4fda-86a7-2d339d19003b\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.371903 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-combined-ca-bundle\") pod \"4dc57e41-ec49-4fda-86a7-2d339d19003b\" (UID: \"4dc57e41-ec49-4fda-86a7-2d339d19003b\") " Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.394051 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dc57e41-ec49-4fda-86a7-2d339d19003b-kube-api-access-fxwg8" (OuterVolumeSpecName: "kube-api-access-fxwg8") pod "4dc57e41-ec49-4fda-86a7-2d339d19003b" (UID: "4dc57e41-ec49-4fda-86a7-2d339d19003b"). InnerVolumeSpecName "kube-api-access-fxwg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.397393 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-inventory" (OuterVolumeSpecName: "inventory") pod "4dc57e41-ec49-4fda-86a7-2d339d19003b" (UID: "4dc57e41-ec49-4fda-86a7-2d339d19003b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.402770 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4dc57e41-ec49-4fda-86a7-2d339d19003b" (UID: "4dc57e41-ec49-4fda-86a7-2d339d19003b"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.413627 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4dc57e41-ec49-4fda-86a7-2d339d19003b" (UID: "4dc57e41-ec49-4fda-86a7-2d339d19003b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.414053 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "4dc57e41-ec49-4fda-86a7-2d339d19003b" (UID: "4dc57e41-ec49-4fda-86a7-2d339d19003b"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.414616 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "4dc57e41-ec49-4fda-86a7-2d339d19003b" (UID: "4dc57e41-ec49-4fda-86a7-2d339d19003b"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.419692 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "4dc57e41-ec49-4fda-86a7-2d339d19003b" (UID: "4dc57e41-ec49-4fda-86a7-2d339d19003b"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.439140 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "4dc57e41-ec49-4fda-86a7-2d339d19003b" (UID: "4dc57e41-ec49-4fda-86a7-2d339d19003b"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.439239 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "4dc57e41-ec49-4fda-86a7-2d339d19003b" (UID: "4dc57e41-ec49-4fda-86a7-2d339d19003b"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.473955 4631 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.473994 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.474007 4631 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.474024 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxwg8\" (UniqueName: \"kubernetes.io/projected/4dc57e41-ec49-4fda-86a7-2d339d19003b-kube-api-access-fxwg8\") on node \"crc\" DevicePath \"\"" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.474038 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.474050 4631 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.474062 4631 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.474076 4631 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.474090 4631 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4dc57e41-ec49-4fda-86a7-2d339d19003b-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.719311 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" event={"ID":"4dc57e41-ec49-4fda-86a7-2d339d19003b","Type":"ContainerDied","Data":"8ebc91283b60e9671adce8a2ce6017dc809615d76c421aaf289e1a62f2fc7b73"} Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.719373 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ebc91283b60e9671adce8a2ce6017dc809615d76c421aaf289e1a62f2fc7b73" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.719429 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-42rzz" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.858101 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc"] Nov 29 04:57:35 crc kubenswrapper[4631]: E1129 04:57:35.858532 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerName="extract-content" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.858554 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerName="extract-content" Nov 29 04:57:35 crc kubenswrapper[4631]: E1129 04:57:35.858578 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dc57e41-ec49-4fda-86a7-2d339d19003b" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.858588 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dc57e41-ec49-4fda-86a7-2d339d19003b" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 29 04:57:35 crc kubenswrapper[4631]: E1129 04:57:35.858622 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerName="extract-utilities" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.858632 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerName="extract-utilities" Nov 29 04:57:35 crc kubenswrapper[4631]: E1129 04:57:35.858648 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerName="registry-server" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.858655 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerName="registry-server" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.858882 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f2dce73-ee2d-4566-bdc2-e5daf964ca77" containerName="registry-server" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.858907 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dc57e41-ec49-4fda-86a7-2d339d19003b" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.859601 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.862885 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.862989 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.863076 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.863239 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-hf652" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.863251 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.873139 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc"] Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.983077 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.983133 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.983205 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.983242 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.983288 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzqlq\" (UniqueName: \"kubernetes.io/projected/38877ce3-8e06-44be-9be6-4abb374c32fa-kube-api-access-nzqlq\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.983316 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:35 crc kubenswrapper[4631]: I1129 04:57:35.983346 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.085477 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.085791 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.085873 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzqlq\" (UniqueName: \"kubernetes.io/projected/38877ce3-8e06-44be-9be6-4abb374c32fa-kube-api-access-nzqlq\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.085911 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.085929 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.086093 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.086978 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.092051 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.093396 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.093814 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.094438 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.098675 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.104528 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzqlq\" (UniqueName: \"kubernetes.io/projected/38877ce3-8e06-44be-9be6-4abb374c32fa-kube-api-access-nzqlq\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.108869 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.211834 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 04:57:36 crc kubenswrapper[4631]: W1129 04:57:36.814477 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38877ce3_8e06_44be_9be6_4abb374c32fa.slice/crio-9a3a9ede66973a4f481da7c5d3f9c16c518d82a35b6144b5d5e31052c33641bf WatchSource:0}: Error finding container 9a3a9ede66973a4f481da7c5d3f9c16c518d82a35b6144b5d5e31052c33641bf: Status 404 returned error can't find the container with id 9a3a9ede66973a4f481da7c5d3f9c16c518d82a35b6144b5d5e31052c33641bf Nov 29 04:57:36 crc kubenswrapper[4631]: I1129 04:57:36.818193 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc"] Nov 29 04:57:37 crc kubenswrapper[4631]: I1129 04:57:37.739762 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" event={"ID":"38877ce3-8e06-44be-9be6-4abb374c32fa","Type":"ContainerStarted","Data":"a6cc199cc0acc53de8cb0b3a2061a1aad58a10cb774cc2068721fde99671a60c"} Nov 29 04:57:37 crc kubenswrapper[4631]: I1129 04:57:37.740484 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" event={"ID":"38877ce3-8e06-44be-9be6-4abb374c32fa","Type":"ContainerStarted","Data":"9a3a9ede66973a4f481da7c5d3f9c16c518d82a35b6144b5d5e31052c33641bf"} Nov 29 04:57:37 crc kubenswrapper[4631]: I1129 04:57:37.764705 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" podStartSLOduration=2.220784737 podStartE2EDuration="2.764678947s" podCreationTimestamp="2025-11-29 04:57:35 +0000 UTC" firstStartedPulling="2025-11-29 04:57:36.831414575 +0000 UTC m=+2783.895918089" lastFinishedPulling="2025-11-29 04:57:37.375308765 +0000 UTC m=+2784.439812299" observedRunningTime="2025-11-29 04:57:37.755774788 +0000 UTC m=+2784.820278342" watchObservedRunningTime="2025-11-29 04:57:37.764678947 +0000 UTC m=+2784.829182491" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.149128 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4lslk"] Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.153182 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.163890 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4lslk"] Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.263260 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-catalog-content\") pod \"redhat-marketplace-4lslk\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.263306 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-utilities\") pod \"redhat-marketplace-4lslk\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.263372 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnwdz\" (UniqueName: \"kubernetes.io/projected/d014ca7f-372e-4117-82b4-676b4368a1d7-kube-api-access-tnwdz\") pod \"redhat-marketplace-4lslk\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.364766 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-catalog-content\") pod \"redhat-marketplace-4lslk\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.364821 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-utilities\") pod \"redhat-marketplace-4lslk\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.364870 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnwdz\" (UniqueName: \"kubernetes.io/projected/d014ca7f-372e-4117-82b4-676b4368a1d7-kube-api-access-tnwdz\") pod \"redhat-marketplace-4lslk\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.365434 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-catalog-content\") pod \"redhat-marketplace-4lslk\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.365500 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-utilities\") pod \"redhat-marketplace-4lslk\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.392089 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnwdz\" (UniqueName: \"kubernetes.io/projected/d014ca7f-372e-4117-82b4-676b4368a1d7-kube-api-access-tnwdz\") pod \"redhat-marketplace-4lslk\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:20 crc kubenswrapper[4631]: I1129 04:58:20.475430 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:21 crc kubenswrapper[4631]: I1129 04:58:21.037008 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4lslk"] Nov 29 04:58:21 crc kubenswrapper[4631]: I1129 04:58:21.206921 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4lslk" event={"ID":"d014ca7f-372e-4117-82b4-676b4368a1d7","Type":"ContainerStarted","Data":"ede90333425914f6c8e6096aa7f59898abc613c3d79b4335b7ddefb66e55e003"} Nov 29 04:58:22 crc kubenswrapper[4631]: I1129 04:58:22.222318 4631 generic.go:334] "Generic (PLEG): container finished" podID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerID="d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91" exitCode=0 Nov 29 04:58:22 crc kubenswrapper[4631]: I1129 04:58:22.222475 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4lslk" event={"ID":"d014ca7f-372e-4117-82b4-676b4368a1d7","Type":"ContainerDied","Data":"d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91"} Nov 29 04:58:24 crc kubenswrapper[4631]: I1129 04:58:24.255315 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4lslk" event={"ID":"d014ca7f-372e-4117-82b4-676b4368a1d7","Type":"ContainerStarted","Data":"b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0"} Nov 29 04:58:26 crc kubenswrapper[4631]: I1129 04:58:26.304271 4631 generic.go:334] "Generic (PLEG): container finished" podID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerID="b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0" exitCode=0 Nov 29 04:58:26 crc kubenswrapper[4631]: I1129 04:58:26.304403 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4lslk" event={"ID":"d014ca7f-372e-4117-82b4-676b4368a1d7","Type":"ContainerDied","Data":"b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0"} Nov 29 04:58:27 crc kubenswrapper[4631]: I1129 04:58:27.318420 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4lslk" event={"ID":"d014ca7f-372e-4117-82b4-676b4368a1d7","Type":"ContainerStarted","Data":"65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb"} Nov 29 04:58:27 crc kubenswrapper[4631]: I1129 04:58:27.345589 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4lslk" podStartSLOduration=2.562455471 podStartE2EDuration="7.34556691s" podCreationTimestamp="2025-11-29 04:58:20 +0000 UTC" firstStartedPulling="2025-11-29 04:58:22.22518716 +0000 UTC m=+2829.289690684" lastFinishedPulling="2025-11-29 04:58:27.008298609 +0000 UTC m=+2834.072802123" observedRunningTime="2025-11-29 04:58:27.337274117 +0000 UTC m=+2834.401777631" watchObservedRunningTime="2025-11-29 04:58:27.34556691 +0000 UTC m=+2834.410070444" Nov 29 04:58:30 crc kubenswrapper[4631]: I1129 04:58:30.476513 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:30 crc kubenswrapper[4631]: I1129 04:58:30.477229 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:30 crc kubenswrapper[4631]: I1129 04:58:30.527625 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:40 crc kubenswrapper[4631]: I1129 04:58:40.535094 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:40 crc kubenswrapper[4631]: I1129 04:58:40.587322 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4lslk"] Nov 29 04:58:41 crc kubenswrapper[4631]: I1129 04:58:41.453502 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4lslk" podUID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerName="registry-server" containerID="cri-o://65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb" gracePeriod=2 Nov 29 04:58:41 crc kubenswrapper[4631]: I1129 04:58:41.952030 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.003033 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-catalog-content\") pod \"d014ca7f-372e-4117-82b4-676b4368a1d7\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.003161 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnwdz\" (UniqueName: \"kubernetes.io/projected/d014ca7f-372e-4117-82b4-676b4368a1d7-kube-api-access-tnwdz\") pod \"d014ca7f-372e-4117-82b4-676b4368a1d7\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.003201 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-utilities\") pod \"d014ca7f-372e-4117-82b4-676b4368a1d7\" (UID: \"d014ca7f-372e-4117-82b4-676b4368a1d7\") " Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.004173 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-utilities" (OuterVolumeSpecName: "utilities") pod "d014ca7f-372e-4117-82b4-676b4368a1d7" (UID: "d014ca7f-372e-4117-82b4-676b4368a1d7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.010618 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d014ca7f-372e-4117-82b4-676b4368a1d7-kube-api-access-tnwdz" (OuterVolumeSpecName: "kube-api-access-tnwdz") pod "d014ca7f-372e-4117-82b4-676b4368a1d7" (UID: "d014ca7f-372e-4117-82b4-676b4368a1d7"). InnerVolumeSpecName "kube-api-access-tnwdz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.023823 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d014ca7f-372e-4117-82b4-676b4368a1d7" (UID: "d014ca7f-372e-4117-82b4-676b4368a1d7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.104654 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnwdz\" (UniqueName: \"kubernetes.io/projected/d014ca7f-372e-4117-82b4-676b4368a1d7-kube-api-access-tnwdz\") on node \"crc\" DevicePath \"\"" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.104678 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.104689 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d014ca7f-372e-4117-82b4-676b4368a1d7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.463012 4631 generic.go:334] "Generic (PLEG): container finished" podID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerID="65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb" exitCode=0 Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.463064 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4lslk" event={"ID":"d014ca7f-372e-4117-82b4-676b4368a1d7","Type":"ContainerDied","Data":"65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb"} Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.463082 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4lslk" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.463106 4631 scope.go:117] "RemoveContainer" containerID="65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.463094 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4lslk" event={"ID":"d014ca7f-372e-4117-82b4-676b4368a1d7","Type":"ContainerDied","Data":"ede90333425914f6c8e6096aa7f59898abc613c3d79b4335b7ddefb66e55e003"} Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.485919 4631 scope.go:117] "RemoveContainer" containerID="b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.502135 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4lslk"] Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.508319 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4lslk"] Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.530952 4631 scope.go:117] "RemoveContainer" containerID="d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.553618 4631 scope.go:117] "RemoveContainer" containerID="65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb" Nov 29 04:58:42 crc kubenswrapper[4631]: E1129 04:58:42.554028 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb\": container with ID starting with 65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb not found: ID does not exist" containerID="65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.554056 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb"} err="failed to get container status \"65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb\": rpc error: code = NotFound desc = could not find container \"65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb\": container with ID starting with 65cbc62aa6f64b54ef9704dabc24b85cbf2815ee3c2aace971289fb11adcb6eb not found: ID does not exist" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.554076 4631 scope.go:117] "RemoveContainer" containerID="b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0" Nov 29 04:58:42 crc kubenswrapper[4631]: E1129 04:58:42.554276 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0\": container with ID starting with b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0 not found: ID does not exist" containerID="b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.554302 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0"} err="failed to get container status \"b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0\": rpc error: code = NotFound desc = could not find container \"b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0\": container with ID starting with b1f34b58ed1bfaf0a657540b7d27d966c4f3e96f0f5fc897189dd45a6f9474d0 not found: ID does not exist" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.554319 4631 scope.go:117] "RemoveContainer" containerID="d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91" Nov 29 04:58:42 crc kubenswrapper[4631]: E1129 04:58:42.554617 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91\": container with ID starting with d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91 not found: ID does not exist" containerID="d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91" Nov 29 04:58:42 crc kubenswrapper[4631]: I1129 04:58:42.554638 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91"} err="failed to get container status \"d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91\": rpc error: code = NotFound desc = could not find container \"d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91\": container with ID starting with d19d6319d2dec5b43cebd344077727e20f1c0715ef564cbfc4d4f943c99e9d91 not found: ID does not exist" Nov 29 04:58:43 crc kubenswrapper[4631]: I1129 04:58:43.231810 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d014ca7f-372e-4117-82b4-676b4368a1d7" path="/var/lib/kubelet/pods/d014ca7f-372e-4117-82b4-676b4368a1d7/volumes" Nov 29 04:58:50 crc kubenswrapper[4631]: I1129 04:58:50.715966 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:58:50 crc kubenswrapper[4631]: I1129 04:58:50.716853 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:59:20 crc kubenswrapper[4631]: I1129 04:59:20.715925 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:59:20 crc kubenswrapper[4631]: I1129 04:59:20.716616 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:59:50 crc kubenswrapper[4631]: I1129 04:59:50.716364 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 04:59:50 crc kubenswrapper[4631]: I1129 04:59:50.717013 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 04:59:50 crc kubenswrapper[4631]: I1129 04:59:50.717084 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 04:59:50 crc kubenswrapper[4631]: I1129 04:59:50.718170 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b7fb9a7e18ea2e496f485c0bfb951b874a500fca1fbcb82c5e3f2b282902676a"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 04:59:50 crc kubenswrapper[4631]: I1129 04:59:50.718281 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://b7fb9a7e18ea2e496f485c0bfb951b874a500fca1fbcb82c5e3f2b282902676a" gracePeriod=600 Nov 29 04:59:51 crc kubenswrapper[4631]: I1129 04:59:51.260300 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="b7fb9a7e18ea2e496f485c0bfb951b874a500fca1fbcb82c5e3f2b282902676a" exitCode=0 Nov 29 04:59:51 crc kubenswrapper[4631]: I1129 04:59:51.260361 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"b7fb9a7e18ea2e496f485c0bfb951b874a500fca1fbcb82c5e3f2b282902676a"} Nov 29 04:59:51 crc kubenswrapper[4631]: I1129 04:59:51.260827 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6"} Nov 29 04:59:51 crc kubenswrapper[4631]: I1129 04:59:51.260896 4631 scope.go:117] "RemoveContainer" containerID="b4b2b5b5963a249180d8e660cd2ab475d17e9d53f4954d0ce5e993e3afb6974f" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.178724 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn"] Nov 29 05:00:00 crc kubenswrapper[4631]: E1129 05:00:00.179857 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerName="extract-content" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.179879 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerName="extract-content" Nov 29 05:00:00 crc kubenswrapper[4631]: E1129 05:00:00.179920 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerName="registry-server" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.179931 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerName="registry-server" Nov 29 05:00:00 crc kubenswrapper[4631]: E1129 05:00:00.179955 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerName="extract-utilities" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.179967 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerName="extract-utilities" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.180245 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="d014ca7f-372e-4117-82b4-676b4368a1d7" containerName="registry-server" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.181246 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.184932 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.185098 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.224378 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn"] Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.248023 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d07fb720-bdcc-440c-a503-dce590236b78-config-volume\") pod \"collect-profiles-29406540-fcxpn\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.248085 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d07fb720-bdcc-440c-a503-dce590236b78-secret-volume\") pod \"collect-profiles-29406540-fcxpn\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.248123 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz6r7\" (UniqueName: \"kubernetes.io/projected/d07fb720-bdcc-440c-a503-dce590236b78-kube-api-access-hz6r7\") pod \"collect-profiles-29406540-fcxpn\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.349901 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d07fb720-bdcc-440c-a503-dce590236b78-config-volume\") pod \"collect-profiles-29406540-fcxpn\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.349946 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d07fb720-bdcc-440c-a503-dce590236b78-secret-volume\") pod \"collect-profiles-29406540-fcxpn\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.349974 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz6r7\" (UniqueName: \"kubernetes.io/projected/d07fb720-bdcc-440c-a503-dce590236b78-kube-api-access-hz6r7\") pod \"collect-profiles-29406540-fcxpn\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.351211 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d07fb720-bdcc-440c-a503-dce590236b78-config-volume\") pod \"collect-profiles-29406540-fcxpn\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.362495 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d07fb720-bdcc-440c-a503-dce590236b78-secret-volume\") pod \"collect-profiles-29406540-fcxpn\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.372192 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz6r7\" (UniqueName: \"kubernetes.io/projected/d07fb720-bdcc-440c-a503-dce590236b78-kube-api-access-hz6r7\") pod \"collect-profiles-29406540-fcxpn\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:00 crc kubenswrapper[4631]: I1129 05:00:00.558856 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:01 crc kubenswrapper[4631]: I1129 05:00:01.043081 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn"] Nov 29 05:00:01 crc kubenswrapper[4631]: I1129 05:00:01.376252 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" event={"ID":"d07fb720-bdcc-440c-a503-dce590236b78","Type":"ContainerStarted","Data":"5f09fe89cf54928c7bdeef60f73efed81de672b934b9174c4986d48d8147afb7"} Nov 29 05:00:01 crc kubenswrapper[4631]: I1129 05:00:01.376813 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" event={"ID":"d07fb720-bdcc-440c-a503-dce590236b78","Type":"ContainerStarted","Data":"f82fcb3695d0e4691a7bc4b88b30252e4d3a159842e19728b70706ed542e3fa0"} Nov 29 05:00:01 crc kubenswrapper[4631]: I1129 05:00:01.398797 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" podStartSLOduration=1.398782949 podStartE2EDuration="1.398782949s" podCreationTimestamp="2025-11-29 05:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 05:00:01.390558448 +0000 UTC m=+2928.455061952" watchObservedRunningTime="2025-11-29 05:00:01.398782949 +0000 UTC m=+2928.463286463" Nov 29 05:00:02 crc kubenswrapper[4631]: I1129 05:00:02.394008 4631 generic.go:334] "Generic (PLEG): container finished" podID="d07fb720-bdcc-440c-a503-dce590236b78" containerID="5f09fe89cf54928c7bdeef60f73efed81de672b934b9174c4986d48d8147afb7" exitCode=0 Nov 29 05:00:02 crc kubenswrapper[4631]: I1129 05:00:02.394439 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" event={"ID":"d07fb720-bdcc-440c-a503-dce590236b78","Type":"ContainerDied","Data":"5f09fe89cf54928c7bdeef60f73efed81de672b934b9174c4986d48d8147afb7"} Nov 29 05:00:03 crc kubenswrapper[4631]: I1129 05:00:03.701188 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:03 crc kubenswrapper[4631]: I1129 05:00:03.816908 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d07fb720-bdcc-440c-a503-dce590236b78-secret-volume\") pod \"d07fb720-bdcc-440c-a503-dce590236b78\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " Nov 29 05:00:03 crc kubenswrapper[4631]: I1129 05:00:03.816959 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz6r7\" (UniqueName: \"kubernetes.io/projected/d07fb720-bdcc-440c-a503-dce590236b78-kube-api-access-hz6r7\") pod \"d07fb720-bdcc-440c-a503-dce590236b78\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " Nov 29 05:00:03 crc kubenswrapper[4631]: I1129 05:00:03.817073 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d07fb720-bdcc-440c-a503-dce590236b78-config-volume\") pod \"d07fb720-bdcc-440c-a503-dce590236b78\" (UID: \"d07fb720-bdcc-440c-a503-dce590236b78\") " Nov 29 05:00:03 crc kubenswrapper[4631]: I1129 05:00:03.818212 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d07fb720-bdcc-440c-a503-dce590236b78-config-volume" (OuterVolumeSpecName: "config-volume") pod "d07fb720-bdcc-440c-a503-dce590236b78" (UID: "d07fb720-bdcc-440c-a503-dce590236b78"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 05:00:03 crc kubenswrapper[4631]: I1129 05:00:03.828490 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d07fb720-bdcc-440c-a503-dce590236b78-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d07fb720-bdcc-440c-a503-dce590236b78" (UID: "d07fb720-bdcc-440c-a503-dce590236b78"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:00:03 crc kubenswrapper[4631]: I1129 05:00:03.828544 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d07fb720-bdcc-440c-a503-dce590236b78-kube-api-access-hz6r7" (OuterVolumeSpecName: "kube-api-access-hz6r7") pod "d07fb720-bdcc-440c-a503-dce590236b78" (UID: "d07fb720-bdcc-440c-a503-dce590236b78"). InnerVolumeSpecName "kube-api-access-hz6r7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:00:03 crc kubenswrapper[4631]: I1129 05:00:03.920197 4631 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d07fb720-bdcc-440c-a503-dce590236b78-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 05:00:03 crc kubenswrapper[4631]: I1129 05:00:03.920542 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz6r7\" (UniqueName: \"kubernetes.io/projected/d07fb720-bdcc-440c-a503-dce590236b78-kube-api-access-hz6r7\") on node \"crc\" DevicePath \"\"" Nov 29 05:00:03 crc kubenswrapper[4631]: I1129 05:00:03.920677 4631 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d07fb720-bdcc-440c-a503-dce590236b78-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 05:00:04 crc kubenswrapper[4631]: I1129 05:00:04.419051 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" event={"ID":"d07fb720-bdcc-440c-a503-dce590236b78","Type":"ContainerDied","Data":"f82fcb3695d0e4691a7bc4b88b30252e4d3a159842e19728b70706ed542e3fa0"} Nov 29 05:00:04 crc kubenswrapper[4631]: I1129 05:00:04.419111 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f82fcb3695d0e4691a7bc4b88b30252e4d3a159842e19728b70706ed542e3fa0" Nov 29 05:00:04 crc kubenswrapper[4631]: I1129 05:00:04.419221 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406540-fcxpn" Nov 29 05:00:04 crc kubenswrapper[4631]: I1129 05:00:04.515286 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs"] Nov 29 05:00:04 crc kubenswrapper[4631]: I1129 05:00:04.524401 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406495-58zvs"] Nov 29 05:00:05 crc kubenswrapper[4631]: I1129 05:00:05.229379 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="083f2d0e-7023-4525-bf09-65b19f3b60b0" path="/var/lib/kubelet/pods/083f2d0e-7023-4525-bf09-65b19f3b60b0/volumes" Nov 29 05:00:43 crc kubenswrapper[4631]: I1129 05:00:43.894259 4631 generic.go:334] "Generic (PLEG): container finished" podID="38877ce3-8e06-44be-9be6-4abb374c32fa" containerID="a6cc199cc0acc53de8cb0b3a2061a1aad58a10cb774cc2068721fde99671a60c" exitCode=0 Nov 29 05:00:43 crc kubenswrapper[4631]: I1129 05:00:43.894349 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" event={"ID":"38877ce3-8e06-44be-9be6-4abb374c32fa","Type":"ContainerDied","Data":"a6cc199cc0acc53de8cb0b3a2061a1aad58a10cb774cc2068721fde99671a60c"} Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.320911 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.402172 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-2\") pod \"38877ce3-8e06-44be-9be6-4abb374c32fa\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.402363 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzqlq\" (UniqueName: \"kubernetes.io/projected/38877ce3-8e06-44be-9be6-4abb374c32fa-kube-api-access-nzqlq\") pod \"38877ce3-8e06-44be-9be6-4abb374c32fa\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.402484 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-inventory\") pod \"38877ce3-8e06-44be-9be6-4abb374c32fa\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.402551 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-telemetry-combined-ca-bundle\") pod \"38877ce3-8e06-44be-9be6-4abb374c32fa\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.402608 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ssh-key\") pod \"38877ce3-8e06-44be-9be6-4abb374c32fa\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.402705 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-0\") pod \"38877ce3-8e06-44be-9be6-4abb374c32fa\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.402738 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-1\") pod \"38877ce3-8e06-44be-9be6-4abb374c32fa\" (UID: \"38877ce3-8e06-44be-9be6-4abb374c32fa\") " Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.428661 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "38877ce3-8e06-44be-9be6-4abb374c32fa" (UID: "38877ce3-8e06-44be-9be6-4abb374c32fa"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.428753 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38877ce3-8e06-44be-9be6-4abb374c32fa-kube-api-access-nzqlq" (OuterVolumeSpecName: "kube-api-access-nzqlq") pod "38877ce3-8e06-44be-9be6-4abb374c32fa" (UID: "38877ce3-8e06-44be-9be6-4abb374c32fa"). InnerVolumeSpecName "kube-api-access-nzqlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.432754 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "38877ce3-8e06-44be-9be6-4abb374c32fa" (UID: "38877ce3-8e06-44be-9be6-4abb374c32fa"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.433195 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "38877ce3-8e06-44be-9be6-4abb374c32fa" (UID: "38877ce3-8e06-44be-9be6-4abb374c32fa"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.439828 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "38877ce3-8e06-44be-9be6-4abb374c32fa" (UID: "38877ce3-8e06-44be-9be6-4abb374c32fa"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.441673 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-inventory" (OuterVolumeSpecName: "inventory") pod "38877ce3-8e06-44be-9be6-4abb374c32fa" (UID: "38877ce3-8e06-44be-9be6-4abb374c32fa"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.445078 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "38877ce3-8e06-44be-9be6-4abb374c32fa" (UID: "38877ce3-8e06-44be-9be6-4abb374c32fa"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.505105 4631 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.505148 4631 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.505165 4631 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.505181 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzqlq\" (UniqueName: \"kubernetes.io/projected/38877ce3-8e06-44be-9be6-4abb374c32fa-kube-api-access-nzqlq\") on node \"crc\" DevicePath \"\"" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.505197 4631 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-inventory\") on node \"crc\" DevicePath \"\"" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.505208 4631 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.505221 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/38877ce3-8e06-44be-9be6-4abb374c32fa-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.915017 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" event={"ID":"38877ce3-8e06-44be-9be6-4abb374c32fa","Type":"ContainerDied","Data":"9a3a9ede66973a4f481da7c5d3f9c16c518d82a35b6144b5d5e31052c33641bf"} Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.915451 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a3a9ede66973a4f481da7c5d3f9c16c518d82a35b6144b5d5e31052c33641bf" Nov 29 05:00:45 crc kubenswrapper[4631]: I1129 05:00:45.915099 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.158800 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29406541-z5vp6"] Nov 29 05:01:00 crc kubenswrapper[4631]: E1129 05:01:00.160047 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d07fb720-bdcc-440c-a503-dce590236b78" containerName="collect-profiles" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.160073 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="d07fb720-bdcc-440c-a503-dce590236b78" containerName="collect-profiles" Nov 29 05:01:00 crc kubenswrapper[4631]: E1129 05:01:00.160105 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38877ce3-8e06-44be-9be6-4abb374c32fa" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.160119 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="38877ce3-8e06-44be-9be6-4abb374c32fa" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.160439 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="38877ce3-8e06-44be-9be6-4abb374c32fa" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.160474 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="d07fb720-bdcc-440c-a503-dce590236b78" containerName="collect-profiles" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.161681 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.172400 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29406541-z5vp6"] Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.349652 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-combined-ca-bundle\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.349729 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xf8h\" (UniqueName: \"kubernetes.io/projected/1f61da44-0694-4d79-b156-8ed5d0358a3f-kube-api-access-5xf8h\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.350556 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-config-data\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.350625 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-fernet-keys\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.452587 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-combined-ca-bundle\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.452647 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xf8h\" (UniqueName: \"kubernetes.io/projected/1f61da44-0694-4d79-b156-8ed5d0358a3f-kube-api-access-5xf8h\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.452741 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-config-data\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.452776 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-fernet-keys\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.459251 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-config-data\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.460024 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-fernet-keys\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.465301 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-combined-ca-bundle\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.479164 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xf8h\" (UniqueName: \"kubernetes.io/projected/1f61da44-0694-4d79-b156-8ed5d0358a3f-kube-api-access-5xf8h\") pod \"keystone-cron-29406541-z5vp6\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:00 crc kubenswrapper[4631]: I1129 05:01:00.520430 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:01 crc kubenswrapper[4631]: I1129 05:01:01.006560 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29406541-z5vp6"] Nov 29 05:01:01 crc kubenswrapper[4631]: W1129 05:01:01.050526 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f61da44_0694_4d79_b156_8ed5d0358a3f.slice/crio-c10645659c4e1fe32c9c31136ad245be4a5f6b151201de3d738ad1af8fc5afd5 WatchSource:0}: Error finding container c10645659c4e1fe32c9c31136ad245be4a5f6b151201de3d738ad1af8fc5afd5: Status 404 returned error can't find the container with id c10645659c4e1fe32c9c31136ad245be4a5f6b151201de3d738ad1af8fc5afd5 Nov 29 05:01:01 crc kubenswrapper[4631]: I1129 05:01:01.103392 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29406541-z5vp6" event={"ID":"1f61da44-0694-4d79-b156-8ed5d0358a3f","Type":"ContainerStarted","Data":"c10645659c4e1fe32c9c31136ad245be4a5f6b151201de3d738ad1af8fc5afd5"} Nov 29 05:01:02 crc kubenswrapper[4631]: I1129 05:01:02.116225 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29406541-z5vp6" event={"ID":"1f61da44-0694-4d79-b156-8ed5d0358a3f","Type":"ContainerStarted","Data":"7c3303633c195c86282f2d7ed018a63f06cb4fdbacbce2f4a51f18a8c408a5ea"} Nov 29 05:01:02 crc kubenswrapper[4631]: I1129 05:01:02.139705 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29406541-z5vp6" podStartSLOduration=2.139687922 podStartE2EDuration="2.139687922s" podCreationTimestamp="2025-11-29 05:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 05:01:02.136847942 +0000 UTC m=+2989.201351476" watchObservedRunningTime="2025-11-29 05:01:02.139687922 +0000 UTC m=+2989.204191426" Nov 29 05:01:03 crc kubenswrapper[4631]: I1129 05:01:03.389478 4631 scope.go:117] "RemoveContainer" containerID="04a16341ab580bcac91e381b4a7fd05a101cc56597e8195930e2b375c6eba1f1" Nov 29 05:01:04 crc kubenswrapper[4631]: I1129 05:01:04.137301 4631 generic.go:334] "Generic (PLEG): container finished" podID="1f61da44-0694-4d79-b156-8ed5d0358a3f" containerID="7c3303633c195c86282f2d7ed018a63f06cb4fdbacbce2f4a51f18a8c408a5ea" exitCode=0 Nov 29 05:01:04 crc kubenswrapper[4631]: I1129 05:01:04.137359 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29406541-z5vp6" event={"ID":"1f61da44-0694-4d79-b156-8ed5d0358a3f","Type":"ContainerDied","Data":"7c3303633c195c86282f2d7ed018a63f06cb4fdbacbce2f4a51f18a8c408a5ea"} Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.472325 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.656388 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-fernet-keys\") pod \"1f61da44-0694-4d79-b156-8ed5d0358a3f\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.656552 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-config-data\") pod \"1f61da44-0694-4d79-b156-8ed5d0358a3f\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.656582 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-combined-ca-bundle\") pod \"1f61da44-0694-4d79-b156-8ed5d0358a3f\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.656618 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xf8h\" (UniqueName: \"kubernetes.io/projected/1f61da44-0694-4d79-b156-8ed5d0358a3f-kube-api-access-5xf8h\") pod \"1f61da44-0694-4d79-b156-8ed5d0358a3f\" (UID: \"1f61da44-0694-4d79-b156-8ed5d0358a3f\") " Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.664555 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1f61da44-0694-4d79-b156-8ed5d0358a3f" (UID: "1f61da44-0694-4d79-b156-8ed5d0358a3f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.684595 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f61da44-0694-4d79-b156-8ed5d0358a3f-kube-api-access-5xf8h" (OuterVolumeSpecName: "kube-api-access-5xf8h") pod "1f61da44-0694-4d79-b156-8ed5d0358a3f" (UID: "1f61da44-0694-4d79-b156-8ed5d0358a3f"). InnerVolumeSpecName "kube-api-access-5xf8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.717406 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1f61da44-0694-4d79-b156-8ed5d0358a3f" (UID: "1f61da44-0694-4d79-b156-8ed5d0358a3f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.759571 4631 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.759601 4631 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.759613 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xf8h\" (UniqueName: \"kubernetes.io/projected/1f61da44-0694-4d79-b156-8ed5d0358a3f-kube-api-access-5xf8h\") on node \"crc\" DevicePath \"\"" Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.826522 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-config-data" (OuterVolumeSpecName: "config-data") pod "1f61da44-0694-4d79-b156-8ed5d0358a3f" (UID: "1f61da44-0694-4d79-b156-8ed5d0358a3f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:01:05 crc kubenswrapper[4631]: I1129 05:01:05.861386 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f61da44-0694-4d79-b156-8ed5d0358a3f-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 05:01:06 crc kubenswrapper[4631]: I1129 05:01:06.159665 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29406541-z5vp6" event={"ID":"1f61da44-0694-4d79-b156-8ed5d0358a3f","Type":"ContainerDied","Data":"c10645659c4e1fe32c9c31136ad245be4a5f6b151201de3d738ad1af8fc5afd5"} Nov 29 05:01:06 crc kubenswrapper[4631]: I1129 05:01:06.159702 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c10645659c4e1fe32c9c31136ad245be4a5f6b151201de3d738ad1af8fc5afd5" Nov 29 05:01:06 crc kubenswrapper[4631]: I1129 05:01:06.159748 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29406541-z5vp6" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.104547 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 29 05:01:43 crc kubenswrapper[4631]: E1129 05:01:43.106054 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f61da44-0694-4d79-b156-8ed5d0358a3f" containerName="keystone-cron" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.106082 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f61da44-0694-4d79-b156-8ed5d0358a3f" containerName="keystone-cron" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.106505 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f61da44-0694-4d79-b156-8ed5d0358a3f" containerName="keystone-cron" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.107900 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.113635 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.114067 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.115992 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.117205 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5sdwg" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.119609 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.241651 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.241773 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.241808 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.241895 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.241921 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb8gh\" (UniqueName: \"kubernetes.io/projected/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-kube-api-access-zb8gh\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.241956 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.242031 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.242091 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.242180 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-config-data\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.344267 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.344364 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.344404 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.344454 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.344480 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb8gh\" (UniqueName: \"kubernetes.io/projected/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-kube-api-access-zb8gh\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.344510 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.344572 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.344628 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.344687 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-config-data\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.344865 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.345744 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.346265 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.346583 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.347087 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-config-data\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.352144 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.354259 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.364365 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.370000 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb8gh\" (UniqueName: \"kubernetes.io/projected/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-kube-api-access-zb8gh\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.392666 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.453979 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.913689 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 29 05:01:43 crc kubenswrapper[4631]: I1129 05:01:43.925922 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 05:01:44 crc kubenswrapper[4631]: I1129 05:01:44.640456 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb","Type":"ContainerStarted","Data":"713585da4d2ee4e19360ac3d46e80374de27f303939c35da7ffce41730df8241"} Nov 29 05:02:17 crc kubenswrapper[4631]: E1129 05:02:17.688453 4631 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 29 05:02:17 crc kubenswrapper[4631]: E1129 05:02:17.690080 4631 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zb8gh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 29 05:02:17 crc kubenswrapper[4631]: E1129 05:02:17.691503 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" Nov 29 05:02:18 crc kubenswrapper[4631]: E1129 05:02:18.023859 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" Nov 29 05:02:20 crc kubenswrapper[4631]: I1129 05:02:20.716062 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 05:02:20 crc kubenswrapper[4631]: I1129 05:02:20.716476 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 05:02:32 crc kubenswrapper[4631]: I1129 05:02:32.892503 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 29 05:02:34 crc kubenswrapper[4631]: I1129 05:02:34.221619 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb","Type":"ContainerStarted","Data":"6713e9d977951333aebf32a457c8effa3be72d0ab1ddb06349e89a2ed0b438d6"} Nov 29 05:02:34 crc kubenswrapper[4631]: I1129 05:02:34.244111 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.280589967 podStartE2EDuration="52.244091741s" podCreationTimestamp="2025-11-29 05:01:42 +0000 UTC" firstStartedPulling="2025-11-29 05:01:43.925558476 +0000 UTC m=+3030.990062000" lastFinishedPulling="2025-11-29 05:02:32.88906025 +0000 UTC m=+3079.953563774" observedRunningTime="2025-11-29 05:02:34.241927348 +0000 UTC m=+3081.306430942" watchObservedRunningTime="2025-11-29 05:02:34.244091741 +0000 UTC m=+3081.308595265" Nov 29 05:02:50 crc kubenswrapper[4631]: I1129 05:02:50.716187 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 05:02:50 crc kubenswrapper[4631]: I1129 05:02:50.716998 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 05:03:20 crc kubenswrapper[4631]: I1129 05:03:20.716579 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 05:03:20 crc kubenswrapper[4631]: I1129 05:03:20.717091 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 05:03:20 crc kubenswrapper[4631]: I1129 05:03:20.717140 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 05:03:20 crc kubenswrapper[4631]: I1129 05:03:20.717900 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 05:03:20 crc kubenswrapper[4631]: I1129 05:03:20.717958 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" gracePeriod=600 Nov 29 05:03:20 crc kubenswrapper[4631]: E1129 05:03:20.847672 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:03:21 crc kubenswrapper[4631]: I1129 05:03:21.758603 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" exitCode=0 Nov 29 05:03:21 crc kubenswrapper[4631]: I1129 05:03:21.758772 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6"} Nov 29 05:03:21 crc kubenswrapper[4631]: I1129 05:03:21.758920 4631 scope.go:117] "RemoveContainer" containerID="b7fb9a7e18ea2e496f485c0bfb951b874a500fca1fbcb82c5e3f2b282902676a" Nov 29 05:03:21 crc kubenswrapper[4631]: I1129 05:03:21.759592 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:03:21 crc kubenswrapper[4631]: E1129 05:03:21.760050 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:03:36 crc kubenswrapper[4631]: I1129 05:03:36.216216 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:03:36 crc kubenswrapper[4631]: E1129 05:03:36.217086 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:03:51 crc kubenswrapper[4631]: I1129 05:03:51.217091 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:03:51 crc kubenswrapper[4631]: E1129 05:03:51.217861 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:04:02 crc kubenswrapper[4631]: I1129 05:04:02.217032 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:04:02 crc kubenswrapper[4631]: E1129 05:04:02.217869 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:04:11 crc kubenswrapper[4631]: I1129 05:04:11.866296 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cgb56"] Nov 29 05:04:11 crc kubenswrapper[4631]: I1129 05:04:11.868516 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:11 crc kubenswrapper[4631]: I1129 05:04:11.879487 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cgb56"] Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.009364 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4f7f\" (UniqueName: \"kubernetes.io/projected/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-kube-api-access-m4f7f\") pod \"certified-operators-cgb56\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.009669 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-utilities\") pod \"certified-operators-cgb56\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.010452 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-catalog-content\") pod \"certified-operators-cgb56\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.112485 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4f7f\" (UniqueName: \"kubernetes.io/projected/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-kube-api-access-m4f7f\") pod \"certified-operators-cgb56\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.112531 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-utilities\") pod \"certified-operators-cgb56\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.112580 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-catalog-content\") pod \"certified-operators-cgb56\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.113000 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-catalog-content\") pod \"certified-operators-cgb56\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.113153 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-utilities\") pod \"certified-operators-cgb56\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.136636 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4f7f\" (UniqueName: \"kubernetes.io/projected/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-kube-api-access-m4f7f\") pod \"certified-operators-cgb56\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.185773 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:12 crc kubenswrapper[4631]: I1129 05:04:12.723017 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cgb56"] Nov 29 05:04:13 crc kubenswrapper[4631]: I1129 05:04:13.226447 4631 generic.go:334] "Generic (PLEG): container finished" podID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerID="36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f" exitCode=0 Nov 29 05:04:13 crc kubenswrapper[4631]: I1129 05:04:13.233434 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgb56" event={"ID":"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf","Type":"ContainerDied","Data":"36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f"} Nov 29 05:04:13 crc kubenswrapper[4631]: I1129 05:04:13.233484 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgb56" event={"ID":"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf","Type":"ContainerStarted","Data":"8a6100359b04d315b5ec99b56c6da8a6e23191ec0394f5abf511bfe2e30bfdab"} Nov 29 05:04:15 crc kubenswrapper[4631]: I1129 05:04:15.256117 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgb56" event={"ID":"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf","Type":"ContainerStarted","Data":"f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d"} Nov 29 05:04:16 crc kubenswrapper[4631]: I1129 05:04:16.216944 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:04:16 crc kubenswrapper[4631]: E1129 05:04:16.217749 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:04:16 crc kubenswrapper[4631]: I1129 05:04:16.266693 4631 generic.go:334] "Generic (PLEG): container finished" podID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerID="f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d" exitCode=0 Nov 29 05:04:16 crc kubenswrapper[4631]: I1129 05:04:16.266740 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgb56" event={"ID":"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf","Type":"ContainerDied","Data":"f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d"} Nov 29 05:04:17 crc kubenswrapper[4631]: I1129 05:04:17.281583 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgb56" event={"ID":"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf","Type":"ContainerStarted","Data":"b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490"} Nov 29 05:04:17 crc kubenswrapper[4631]: I1129 05:04:17.303311 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cgb56" podStartSLOduration=2.834376862 podStartE2EDuration="6.303293597s" podCreationTimestamp="2025-11-29 05:04:11 +0000 UTC" firstStartedPulling="2025-11-29 05:04:13.233945891 +0000 UTC m=+3180.298449395" lastFinishedPulling="2025-11-29 05:04:16.702862606 +0000 UTC m=+3183.767366130" observedRunningTime="2025-11-29 05:04:17.303035321 +0000 UTC m=+3184.367538835" watchObservedRunningTime="2025-11-29 05:04:17.303293597 +0000 UTC m=+3184.367797111" Nov 29 05:04:22 crc kubenswrapper[4631]: I1129 05:04:22.186624 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:22 crc kubenswrapper[4631]: I1129 05:04:22.188002 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:22 crc kubenswrapper[4631]: I1129 05:04:22.248115 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:22 crc kubenswrapper[4631]: I1129 05:04:22.376519 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:22 crc kubenswrapper[4631]: I1129 05:04:22.487233 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cgb56"] Nov 29 05:04:24 crc kubenswrapper[4631]: I1129 05:04:24.343645 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cgb56" podUID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerName="registry-server" containerID="cri-o://b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490" gracePeriod=2 Nov 29 05:04:24 crc kubenswrapper[4631]: E1129 05:04:24.486715 4631 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcceecd14_0c8c_45fa_bd1a_110d2dd9fccf.slice/crio-conmon-b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490.scope\": RecentStats: unable to find data in memory cache]" Nov 29 05:04:24 crc kubenswrapper[4631]: I1129 05:04:24.972467 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.030508 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-utilities\") pod \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.030694 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-catalog-content\") pod \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.030721 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4f7f\" (UniqueName: \"kubernetes.io/projected/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-kube-api-access-m4f7f\") pod \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\" (UID: \"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf\") " Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.031666 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-utilities" (OuterVolumeSpecName: "utilities") pod "cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" (UID: "cceecd14-0c8c-45fa-bd1a-110d2dd9fccf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.036755 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-kube-api-access-m4f7f" (OuterVolumeSpecName: "kube-api-access-m4f7f") pod "cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" (UID: "cceecd14-0c8c-45fa-bd1a-110d2dd9fccf"). InnerVolumeSpecName "kube-api-access-m4f7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.108291 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" (UID: "cceecd14-0c8c-45fa-bd1a-110d2dd9fccf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.131917 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.131941 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4f7f\" (UniqueName: \"kubernetes.io/projected/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-kube-api-access-m4f7f\") on node \"crc\" DevicePath \"\"" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.131951 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.357309 4631 generic.go:334] "Generic (PLEG): container finished" podID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerID="b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490" exitCode=0 Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.357373 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgb56" event={"ID":"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf","Type":"ContainerDied","Data":"b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490"} Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.357641 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cgb56" event={"ID":"cceecd14-0c8c-45fa-bd1a-110d2dd9fccf","Type":"ContainerDied","Data":"8a6100359b04d315b5ec99b56c6da8a6e23191ec0394f5abf511bfe2e30bfdab"} Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.357396 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cgb56" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.357668 4631 scope.go:117] "RemoveContainer" containerID="b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.378284 4631 scope.go:117] "RemoveContainer" containerID="f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.380254 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cgb56"] Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.388097 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cgb56"] Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.400267 4631 scope.go:117] "RemoveContainer" containerID="36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.441153 4631 scope.go:117] "RemoveContainer" containerID="b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490" Nov 29 05:04:25 crc kubenswrapper[4631]: E1129 05:04:25.442760 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490\": container with ID starting with b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490 not found: ID does not exist" containerID="b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.442811 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490"} err="failed to get container status \"b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490\": rpc error: code = NotFound desc = could not find container \"b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490\": container with ID starting with b161e65915fbcbaaa81d23de678d5a90880d883508d97ee71544228ecca71490 not found: ID does not exist" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.442838 4631 scope.go:117] "RemoveContainer" containerID="f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d" Nov 29 05:04:25 crc kubenswrapper[4631]: E1129 05:04:25.443508 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d\": container with ID starting with f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d not found: ID does not exist" containerID="f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.443533 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d"} err="failed to get container status \"f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d\": rpc error: code = NotFound desc = could not find container \"f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d\": container with ID starting with f2911369d0a1fb5582a842d00f115fd94cec2871cda4fa7cfd8aa3e4cd19e11d not found: ID does not exist" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.443546 4631 scope.go:117] "RemoveContainer" containerID="36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f" Nov 29 05:04:25 crc kubenswrapper[4631]: E1129 05:04:25.444442 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f\": container with ID starting with 36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f not found: ID does not exist" containerID="36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f" Nov 29 05:04:25 crc kubenswrapper[4631]: I1129 05:04:25.444469 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f"} err="failed to get container status \"36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f\": rpc error: code = NotFound desc = could not find container \"36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f\": container with ID starting with 36fe54adf05452bbc00223a0f57795353953a023156a30647bb3a12b2d9b133f not found: ID does not exist" Nov 29 05:04:27 crc kubenswrapper[4631]: I1129 05:04:27.226521 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" path="/var/lib/kubelet/pods/cceecd14-0c8c-45fa-bd1a-110d2dd9fccf/volumes" Nov 29 05:04:30 crc kubenswrapper[4631]: I1129 05:04:30.216998 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:04:30 crc kubenswrapper[4631]: E1129 05:04:30.217423 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.216150 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:04:45 crc kubenswrapper[4631]: E1129 05:04:45.216857 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.395291 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v4mmd"] Nov 29 05:04:45 crc kubenswrapper[4631]: E1129 05:04:45.395902 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerName="extract-utilities" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.395919 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerName="extract-utilities" Nov 29 05:04:45 crc kubenswrapper[4631]: E1129 05:04:45.395932 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerName="extract-content" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.395939 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerName="extract-content" Nov 29 05:04:45 crc kubenswrapper[4631]: E1129 05:04:45.395960 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerName="registry-server" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.395967 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerName="registry-server" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.396140 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cceecd14-0c8c-45fa-bd1a-110d2dd9fccf" containerName="registry-server" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.397771 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.419350 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v4mmd"] Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.502963 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxjms\" (UniqueName: \"kubernetes.io/projected/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-kube-api-access-mxjms\") pod \"redhat-operators-v4mmd\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.503096 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-catalog-content\") pod \"redhat-operators-v4mmd\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.503300 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-utilities\") pod \"redhat-operators-v4mmd\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.605157 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxjms\" (UniqueName: \"kubernetes.io/projected/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-kube-api-access-mxjms\") pod \"redhat-operators-v4mmd\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.605236 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-catalog-content\") pod \"redhat-operators-v4mmd\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.605284 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-utilities\") pod \"redhat-operators-v4mmd\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.605782 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-utilities\") pod \"redhat-operators-v4mmd\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.605842 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-catalog-content\") pod \"redhat-operators-v4mmd\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.626001 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxjms\" (UniqueName: \"kubernetes.io/projected/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-kube-api-access-mxjms\") pod \"redhat-operators-v4mmd\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:45 crc kubenswrapper[4631]: I1129 05:04:45.717175 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:46 crc kubenswrapper[4631]: I1129 05:04:46.197208 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v4mmd"] Nov 29 05:04:46 crc kubenswrapper[4631]: I1129 05:04:46.565637 4631 generic.go:334] "Generic (PLEG): container finished" podID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerID="88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9" exitCode=0 Nov 29 05:04:46 crc kubenswrapper[4631]: I1129 05:04:46.565701 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4mmd" event={"ID":"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0","Type":"ContainerDied","Data":"88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9"} Nov 29 05:04:46 crc kubenswrapper[4631]: I1129 05:04:46.565991 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4mmd" event={"ID":"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0","Type":"ContainerStarted","Data":"8c231bd6158f0300ec711def7c9412678ffcd30e6c01d96d3ad164358d914078"} Nov 29 05:04:47 crc kubenswrapper[4631]: I1129 05:04:47.579245 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4mmd" event={"ID":"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0","Type":"ContainerStarted","Data":"e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483"} Nov 29 05:04:52 crc kubenswrapper[4631]: I1129 05:04:52.660997 4631 generic.go:334] "Generic (PLEG): container finished" podID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerID="e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483" exitCode=0 Nov 29 05:04:52 crc kubenswrapper[4631]: I1129 05:04:52.661901 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4mmd" event={"ID":"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0","Type":"ContainerDied","Data":"e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483"} Nov 29 05:04:53 crc kubenswrapper[4631]: I1129 05:04:53.673323 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4mmd" event={"ID":"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0","Type":"ContainerStarted","Data":"5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5"} Nov 29 05:04:53 crc kubenswrapper[4631]: I1129 05:04:53.700620 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v4mmd" podStartSLOduration=1.9358466380000001 podStartE2EDuration="8.700598607s" podCreationTimestamp="2025-11-29 05:04:45 +0000 UTC" firstStartedPulling="2025-11-29 05:04:46.567433838 +0000 UTC m=+3213.631937352" lastFinishedPulling="2025-11-29 05:04:53.332185797 +0000 UTC m=+3220.396689321" observedRunningTime="2025-11-29 05:04:53.695739777 +0000 UTC m=+3220.760243291" watchObservedRunningTime="2025-11-29 05:04:53.700598607 +0000 UTC m=+3220.765102141" Nov 29 05:04:55 crc kubenswrapper[4631]: I1129 05:04:55.717852 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:55 crc kubenswrapper[4631]: I1129 05:04:55.719378 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:04:56 crc kubenswrapper[4631]: I1129 05:04:56.776318 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-v4mmd" podUID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerName="registry-server" probeResult="failure" output=< Nov 29 05:04:56 crc kubenswrapper[4631]: timeout: failed to connect service ":50051" within 1s Nov 29 05:04:56 crc kubenswrapper[4631]: > Nov 29 05:05:00 crc kubenswrapper[4631]: I1129 05:05:00.216995 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:05:00 crc kubenswrapper[4631]: E1129 05:05:00.217789 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:05:05 crc kubenswrapper[4631]: I1129 05:05:05.784959 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:05:05 crc kubenswrapper[4631]: I1129 05:05:05.843250 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:05:06 crc kubenswrapper[4631]: I1129 05:05:06.024659 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v4mmd"] Nov 29 05:05:07 crc kubenswrapper[4631]: I1129 05:05:07.796278 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v4mmd" podUID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerName="registry-server" containerID="cri-o://5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5" gracePeriod=2 Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.385157 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.496190 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-utilities\") pod \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.496247 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-catalog-content\") pod \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.497259 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-utilities" (OuterVolumeSpecName: "utilities") pod "ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" (UID: "ec99aab2-ff81-4a20-ba8d-0a0e80f119b0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.508033 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mxjms\" (UniqueName: \"kubernetes.io/projected/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-kube-api-access-mxjms\") pod \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\" (UID: \"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0\") " Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.509106 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.521565 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-kube-api-access-mxjms" (OuterVolumeSpecName: "kube-api-access-mxjms") pod "ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" (UID: "ec99aab2-ff81-4a20-ba8d-0a0e80f119b0"). InnerVolumeSpecName "kube-api-access-mxjms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.612488 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mxjms\" (UniqueName: \"kubernetes.io/projected/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-kube-api-access-mxjms\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.648613 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" (UID: "ec99aab2-ff81-4a20-ba8d-0a0e80f119b0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.715351 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.808932 4631 generic.go:334] "Generic (PLEG): container finished" podID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerID="5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5" exitCode=0 Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.809013 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v4mmd" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.809042 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4mmd" event={"ID":"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0","Type":"ContainerDied","Data":"5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5"} Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.809692 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4mmd" event={"ID":"ec99aab2-ff81-4a20-ba8d-0a0e80f119b0","Type":"ContainerDied","Data":"8c231bd6158f0300ec711def7c9412678ffcd30e6c01d96d3ad164358d914078"} Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.809717 4631 scope.go:117] "RemoveContainer" containerID="5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.841848 4631 scope.go:117] "RemoveContainer" containerID="e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.871490 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v4mmd"] Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.873390 4631 scope.go:117] "RemoveContainer" containerID="88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.883731 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v4mmd"] Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.927518 4631 scope.go:117] "RemoveContainer" containerID="5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5" Nov 29 05:05:08 crc kubenswrapper[4631]: E1129 05:05:08.928760 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5\": container with ID starting with 5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5 not found: ID does not exist" containerID="5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.928805 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5"} err="failed to get container status \"5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5\": rpc error: code = NotFound desc = could not find container \"5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5\": container with ID starting with 5874b618f50fe94d50c2ed70d82e8b4485c29bba863f9308ee0cdd8d0e7facd5 not found: ID does not exist" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.928832 4631 scope.go:117] "RemoveContainer" containerID="e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483" Nov 29 05:05:08 crc kubenswrapper[4631]: E1129 05:05:08.929200 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483\": container with ID starting with e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483 not found: ID does not exist" containerID="e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.929241 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483"} err="failed to get container status \"e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483\": rpc error: code = NotFound desc = could not find container \"e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483\": container with ID starting with e93dff575479d35b7eaba7de67f7441133edea82ef33695cc8c9a01535edd483 not found: ID does not exist" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.929269 4631 scope.go:117] "RemoveContainer" containerID="88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9" Nov 29 05:05:08 crc kubenswrapper[4631]: E1129 05:05:08.929635 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9\": container with ID starting with 88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9 not found: ID does not exist" containerID="88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9" Nov 29 05:05:08 crc kubenswrapper[4631]: I1129 05:05:08.929658 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9"} err="failed to get container status \"88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9\": rpc error: code = NotFound desc = could not find container \"88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9\": container with ID starting with 88942af0dc4fc54bd3811edecd35d491c054c85f4e6ecb137224ec39a25c3bb9 not found: ID does not exist" Nov 29 05:05:09 crc kubenswrapper[4631]: I1129 05:05:09.234758 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" path="/var/lib/kubelet/pods/ec99aab2-ff81-4a20-ba8d-0a0e80f119b0/volumes" Nov 29 05:05:09 crc kubenswrapper[4631]: I1129 05:05:09.818104 4631 generic.go:334] "Generic (PLEG): container finished" podID="cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" containerID="6713e9d977951333aebf32a457c8effa3be72d0ab1ddb06349e89a2ed0b438d6" exitCode=0 Nov 29 05:05:09 crc kubenswrapper[4631]: I1129 05:05:09.818188 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb","Type":"ContainerDied","Data":"6713e9d977951333aebf32a457c8effa3be72d0ab1ddb06349e89a2ed0b438d6"} Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.216770 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:05:11 crc kubenswrapper[4631]: E1129 05:05:11.217414 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.325251 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.370361 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.370675 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-config-data\") pod \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.370792 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zb8gh\" (UniqueName: \"kubernetes.io/projected/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-kube-api-access-zb8gh\") pod \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.370837 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config\") pod \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.371483 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-workdir\") pod \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.371532 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config-secret\") pod \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.371559 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ssh-key\") pod \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.371574 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ca-certs\") pod \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.371622 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-temporary\") pod \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\" (UID: \"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb\") " Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.371809 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-config-data" (OuterVolumeSpecName: "config-data") pod "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" (UID: "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.372468 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" (UID: "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.372676 4631 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.372702 4631 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-config-data\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.375873 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "test-operator-logs") pod "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" (UID: "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.383567 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-kube-api-access-zb8gh" (OuterVolumeSpecName: "kube-api-access-zb8gh") pod "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" (UID: "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb"). InnerVolumeSpecName "kube-api-access-zb8gh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.383742 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" (UID: "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.397816 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" (UID: "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.407563 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" (UID: "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.439479 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" (UID: "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.450671 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" (UID: "cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.473478 4631 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.473509 4631 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.473522 4631 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.473531 4631 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.473555 4631 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.473566 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zb8gh\" (UniqueName: \"kubernetes.io/projected/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-kube-api-access-zb8gh\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.473575 4631 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.491988 4631 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.576028 4631 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.853313 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb","Type":"ContainerDied","Data":"713585da4d2ee4e19360ac3d46e80374de27f303939c35da7ffce41730df8241"} Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.853450 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="713585da4d2ee4e19360ac3d46e80374de27f303939c35da7ffce41730df8241" Nov 29 05:05:11 crc kubenswrapper[4631]: I1129 05:05:11.853449 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.132412 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 29 05:05:19 crc kubenswrapper[4631]: E1129 05:05:19.133742 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerName="registry-server" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.133769 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerName="registry-server" Nov 29 05:05:19 crc kubenswrapper[4631]: E1129 05:05:19.133816 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerName="extract-utilities" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.133830 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerName="extract-utilities" Nov 29 05:05:19 crc kubenswrapper[4631]: E1129 05:05:19.133863 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerName="extract-content" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.133875 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerName="extract-content" Nov 29 05:05:19 crc kubenswrapper[4631]: E1129 05:05:19.133899 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" containerName="tempest-tests-tempest-tests-runner" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.133912 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" containerName="tempest-tests-tempest-tests-runner" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.134303 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec99aab2-ff81-4a20-ba8d-0a0e80f119b0" containerName="registry-server" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.134395 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb" containerName="tempest-tests-tempest-tests-runner" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.135469 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.142966 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5sdwg" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.180189 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.239399 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcvpj\" (UniqueName: \"kubernetes.io/projected/446545e5-fff2-4f7e-960c-26bf33c121bc-kube-api-access-gcvpj\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"446545e5-fff2-4f7e-960c-26bf33c121bc\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.239859 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"446545e5-fff2-4f7e-960c-26bf33c121bc\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.342790 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcvpj\" (UniqueName: \"kubernetes.io/projected/446545e5-fff2-4f7e-960c-26bf33c121bc-kube-api-access-gcvpj\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"446545e5-fff2-4f7e-960c-26bf33c121bc\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.343098 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"446545e5-fff2-4f7e-960c-26bf33c121bc\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.343569 4631 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"446545e5-fff2-4f7e-960c-26bf33c121bc\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.366445 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcvpj\" (UniqueName: \"kubernetes.io/projected/446545e5-fff2-4f7e-960c-26bf33c121bc-kube-api-access-gcvpj\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"446545e5-fff2-4f7e-960c-26bf33c121bc\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.387743 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"446545e5-fff2-4f7e-960c-26bf33c121bc\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.472494 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 29 05:05:19 crc kubenswrapper[4631]: I1129 05:05:19.990207 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 29 05:05:20 crc kubenswrapper[4631]: I1129 05:05:20.959975 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"446545e5-fff2-4f7e-960c-26bf33c121bc","Type":"ContainerStarted","Data":"f7d3a3334939cc7bcc4b3e095b9aff8fda44c23088c750a15296f6c4d39220eb"} Nov 29 05:05:21 crc kubenswrapper[4631]: I1129 05:05:21.973226 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"446545e5-fff2-4f7e-960c-26bf33c121bc","Type":"ContainerStarted","Data":"78c6d0f776702ab97f1cfd94974136756c20b31e523047284ee0558a20a51be9"} Nov 29 05:05:21 crc kubenswrapper[4631]: I1129 05:05:21.995768 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.089774951 podStartE2EDuration="2.995752514s" podCreationTimestamp="2025-11-29 05:05:19 +0000 UTC" firstStartedPulling="2025-11-29 05:05:19.998852106 +0000 UTC m=+3247.063355630" lastFinishedPulling="2025-11-29 05:05:20.904829679 +0000 UTC m=+3247.969333193" observedRunningTime="2025-11-29 05:05:21.988481355 +0000 UTC m=+3249.052984869" watchObservedRunningTime="2025-11-29 05:05:21.995752514 +0000 UTC m=+3249.060256028" Nov 29 05:05:23 crc kubenswrapper[4631]: I1129 05:05:23.236805 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:05:23 crc kubenswrapper[4631]: E1129 05:05:23.246284 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:05:34 crc kubenswrapper[4631]: I1129 05:05:34.217394 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:05:34 crc kubenswrapper[4631]: E1129 05:05:34.218195 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.616736 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5sd8j/must-gather-8nkth"] Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.619277 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/must-gather-8nkth" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.621013 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-5sd8j"/"default-dockercfg-tsqml" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.622430 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-5sd8j"/"kube-root-ca.crt" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.623520 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-5sd8j"/"openshift-service-ca.crt" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.655045 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-5sd8j/must-gather-8nkth"] Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.745556 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlsbp\" (UniqueName: \"kubernetes.io/projected/a441aed9-70ab-4efd-b036-0f0af5515f84-kube-api-access-nlsbp\") pod \"must-gather-8nkth\" (UID: \"a441aed9-70ab-4efd-b036-0f0af5515f84\") " pod="openshift-must-gather-5sd8j/must-gather-8nkth" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.745748 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a441aed9-70ab-4efd-b036-0f0af5515f84-must-gather-output\") pod \"must-gather-8nkth\" (UID: \"a441aed9-70ab-4efd-b036-0f0af5515f84\") " pod="openshift-must-gather-5sd8j/must-gather-8nkth" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.847828 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a441aed9-70ab-4efd-b036-0f0af5515f84-must-gather-output\") pod \"must-gather-8nkth\" (UID: \"a441aed9-70ab-4efd-b036-0f0af5515f84\") " pod="openshift-must-gather-5sd8j/must-gather-8nkth" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.848377 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlsbp\" (UniqueName: \"kubernetes.io/projected/a441aed9-70ab-4efd-b036-0f0af5515f84-kube-api-access-nlsbp\") pod \"must-gather-8nkth\" (UID: \"a441aed9-70ab-4efd-b036-0f0af5515f84\") " pod="openshift-must-gather-5sd8j/must-gather-8nkth" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.848388 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a441aed9-70ab-4efd-b036-0f0af5515f84-must-gather-output\") pod \"must-gather-8nkth\" (UID: \"a441aed9-70ab-4efd-b036-0f0af5515f84\") " pod="openshift-must-gather-5sd8j/must-gather-8nkth" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.865315 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlsbp\" (UniqueName: \"kubernetes.io/projected/a441aed9-70ab-4efd-b036-0f0af5515f84-kube-api-access-nlsbp\") pod \"must-gather-8nkth\" (UID: \"a441aed9-70ab-4efd-b036-0f0af5515f84\") " pod="openshift-must-gather-5sd8j/must-gather-8nkth" Nov 29 05:05:43 crc kubenswrapper[4631]: I1129 05:05:43.936898 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/must-gather-8nkth" Nov 29 05:05:44 crc kubenswrapper[4631]: I1129 05:05:44.379302 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-5sd8j/must-gather-8nkth"] Nov 29 05:05:45 crc kubenswrapper[4631]: I1129 05:05:45.239900 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/must-gather-8nkth" event={"ID":"a441aed9-70ab-4efd-b036-0f0af5515f84","Type":"ContainerStarted","Data":"d99781dbfcc5bd538c77f9dee32bcbc3e01ae1ffa4801073770e2dd21a4ba8a7"} Nov 29 05:05:48 crc kubenswrapper[4631]: I1129 05:05:48.881476 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rr7dz"] Nov 29 05:05:48 crc kubenswrapper[4631]: I1129 05:05:48.883630 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:48 crc kubenswrapper[4631]: I1129 05:05:48.895405 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rr7dz"] Nov 29 05:05:48 crc kubenswrapper[4631]: I1129 05:05:48.976386 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-utilities\") pod \"community-operators-rr7dz\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:48 crc kubenswrapper[4631]: I1129 05:05:48.976727 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-catalog-content\") pod \"community-operators-rr7dz\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:48 crc kubenswrapper[4631]: I1129 05:05:48.976899 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vrbv\" (UniqueName: \"kubernetes.io/projected/68a32e0c-4209-4aff-9524-d935d9408cea-kube-api-access-8vrbv\") pod \"community-operators-rr7dz\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:49 crc kubenswrapper[4631]: I1129 05:05:49.078676 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-catalog-content\") pod \"community-operators-rr7dz\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:49 crc kubenswrapper[4631]: I1129 05:05:49.079076 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vrbv\" (UniqueName: \"kubernetes.io/projected/68a32e0c-4209-4aff-9524-d935d9408cea-kube-api-access-8vrbv\") pod \"community-operators-rr7dz\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:49 crc kubenswrapper[4631]: I1129 05:05:49.079114 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-utilities\") pod \"community-operators-rr7dz\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:49 crc kubenswrapper[4631]: I1129 05:05:49.079172 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-catalog-content\") pod \"community-operators-rr7dz\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:49 crc kubenswrapper[4631]: I1129 05:05:49.079543 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-utilities\") pod \"community-operators-rr7dz\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:49 crc kubenswrapper[4631]: I1129 05:05:49.100508 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vrbv\" (UniqueName: \"kubernetes.io/projected/68a32e0c-4209-4aff-9524-d935d9408cea-kube-api-access-8vrbv\") pod \"community-operators-rr7dz\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:49 crc kubenswrapper[4631]: I1129 05:05:49.203885 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:49 crc kubenswrapper[4631]: I1129 05:05:49.220455 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:05:49 crc kubenswrapper[4631]: E1129 05:05:49.220695 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:05:49 crc kubenswrapper[4631]: I1129 05:05:49.300236 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/must-gather-8nkth" event={"ID":"a441aed9-70ab-4efd-b036-0f0af5515f84","Type":"ContainerStarted","Data":"dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9"} Nov 29 05:05:49 crc kubenswrapper[4631]: I1129 05:05:49.920680 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rr7dz"] Nov 29 05:05:50 crc kubenswrapper[4631]: I1129 05:05:50.313067 4631 generic.go:334] "Generic (PLEG): container finished" podID="68a32e0c-4209-4aff-9524-d935d9408cea" containerID="bc20007e8e317751373eee01f244551f9212920ee7bf93fbdfab218d3040c7b4" exitCode=0 Nov 29 05:05:50 crc kubenswrapper[4631]: I1129 05:05:50.313425 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rr7dz" event={"ID":"68a32e0c-4209-4aff-9524-d935d9408cea","Type":"ContainerDied","Data":"bc20007e8e317751373eee01f244551f9212920ee7bf93fbdfab218d3040c7b4"} Nov 29 05:05:50 crc kubenswrapper[4631]: I1129 05:05:50.313454 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rr7dz" event={"ID":"68a32e0c-4209-4aff-9524-d935d9408cea","Type":"ContainerStarted","Data":"873d22805d21ed341d55df9fe29bf9f712e404e0b53d162ded89d28070f3e9cb"} Nov 29 05:05:50 crc kubenswrapper[4631]: I1129 05:05:50.319028 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/must-gather-8nkth" event={"ID":"a441aed9-70ab-4efd-b036-0f0af5515f84","Type":"ContainerStarted","Data":"9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee"} Nov 29 05:05:50 crc kubenswrapper[4631]: I1129 05:05:50.348864 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-5sd8j/must-gather-8nkth" podStartSLOduration=2.887103764 podStartE2EDuration="7.348844978s" podCreationTimestamp="2025-11-29 05:05:43 +0000 UTC" firstStartedPulling="2025-11-29 05:05:44.385128649 +0000 UTC m=+3271.449632163" lastFinishedPulling="2025-11-29 05:05:48.846869853 +0000 UTC m=+3275.911373377" observedRunningTime="2025-11-29 05:05:50.343843605 +0000 UTC m=+3277.408347119" watchObservedRunningTime="2025-11-29 05:05:50.348844978 +0000 UTC m=+3277.413348492" Nov 29 05:05:51 crc kubenswrapper[4631]: I1129 05:05:51.331961 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rr7dz" event={"ID":"68a32e0c-4209-4aff-9524-d935d9408cea","Type":"ContainerStarted","Data":"ac124d2dbe38e36132261589c0216ad77326fda4101efe17280569e16a122b40"} Nov 29 05:05:52 crc kubenswrapper[4631]: I1129 05:05:52.341772 4631 generic.go:334] "Generic (PLEG): container finished" podID="68a32e0c-4209-4aff-9524-d935d9408cea" containerID="ac124d2dbe38e36132261589c0216ad77326fda4101efe17280569e16a122b40" exitCode=0 Nov 29 05:05:52 crc kubenswrapper[4631]: I1129 05:05:52.341933 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rr7dz" event={"ID":"68a32e0c-4209-4aff-9524-d935d9408cea","Type":"ContainerDied","Data":"ac124d2dbe38e36132261589c0216ad77326fda4101efe17280569e16a122b40"} Nov 29 05:05:53 crc kubenswrapper[4631]: I1129 05:05:53.353004 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rr7dz" event={"ID":"68a32e0c-4209-4aff-9524-d935d9408cea","Type":"ContainerStarted","Data":"d4df83655014ab1e904cfa2dedcb23b42794a09af9c92143bc4610fd1232b89a"} Nov 29 05:05:53 crc kubenswrapper[4631]: I1129 05:05:53.391018 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rr7dz" podStartSLOduration=2.85102591 podStartE2EDuration="5.390989137s" podCreationTimestamp="2025-11-29 05:05:48 +0000 UTC" firstStartedPulling="2025-11-29 05:05:50.31642286 +0000 UTC m=+3277.380926364" lastFinishedPulling="2025-11-29 05:05:52.856386077 +0000 UTC m=+3279.920889591" observedRunningTime="2025-11-29 05:05:53.374322717 +0000 UTC m=+3280.438826241" watchObservedRunningTime="2025-11-29 05:05:53.390989137 +0000 UTC m=+3280.455492661" Nov 29 05:05:53 crc kubenswrapper[4631]: I1129 05:05:53.688096 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5sd8j/crc-debug-psh9z"] Nov 29 05:05:53 crc kubenswrapper[4631]: I1129 05:05:53.689238 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-psh9z" Nov 29 05:05:53 crc kubenswrapper[4631]: I1129 05:05:53.768457 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znjn6\" (UniqueName: \"kubernetes.io/projected/42f2aa61-e097-4301-b7a1-b773528c51c5-kube-api-access-znjn6\") pod \"crc-debug-psh9z\" (UID: \"42f2aa61-e097-4301-b7a1-b773528c51c5\") " pod="openshift-must-gather-5sd8j/crc-debug-psh9z" Nov 29 05:05:53 crc kubenswrapper[4631]: I1129 05:05:53.768509 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/42f2aa61-e097-4301-b7a1-b773528c51c5-host\") pod \"crc-debug-psh9z\" (UID: \"42f2aa61-e097-4301-b7a1-b773528c51c5\") " pod="openshift-must-gather-5sd8j/crc-debug-psh9z" Nov 29 05:05:53 crc kubenswrapper[4631]: I1129 05:05:53.870401 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znjn6\" (UniqueName: \"kubernetes.io/projected/42f2aa61-e097-4301-b7a1-b773528c51c5-kube-api-access-znjn6\") pod \"crc-debug-psh9z\" (UID: \"42f2aa61-e097-4301-b7a1-b773528c51c5\") " pod="openshift-must-gather-5sd8j/crc-debug-psh9z" Nov 29 05:05:53 crc kubenswrapper[4631]: I1129 05:05:53.870455 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/42f2aa61-e097-4301-b7a1-b773528c51c5-host\") pod \"crc-debug-psh9z\" (UID: \"42f2aa61-e097-4301-b7a1-b773528c51c5\") " pod="openshift-must-gather-5sd8j/crc-debug-psh9z" Nov 29 05:05:53 crc kubenswrapper[4631]: I1129 05:05:53.870723 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/42f2aa61-e097-4301-b7a1-b773528c51c5-host\") pod \"crc-debug-psh9z\" (UID: \"42f2aa61-e097-4301-b7a1-b773528c51c5\") " pod="openshift-must-gather-5sd8j/crc-debug-psh9z" Nov 29 05:05:53 crc kubenswrapper[4631]: I1129 05:05:53.902376 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znjn6\" (UniqueName: \"kubernetes.io/projected/42f2aa61-e097-4301-b7a1-b773528c51c5-kube-api-access-znjn6\") pod \"crc-debug-psh9z\" (UID: \"42f2aa61-e097-4301-b7a1-b773528c51c5\") " pod="openshift-must-gather-5sd8j/crc-debug-psh9z" Nov 29 05:05:54 crc kubenswrapper[4631]: I1129 05:05:54.003668 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-psh9z" Nov 29 05:05:54 crc kubenswrapper[4631]: I1129 05:05:54.369139 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/crc-debug-psh9z" event={"ID":"42f2aa61-e097-4301-b7a1-b773528c51c5","Type":"ContainerStarted","Data":"f0826e0f1f7f89df359440cbe86105b88c3df8d02c3a3f4e2d376334c4d394ba"} Nov 29 05:05:59 crc kubenswrapper[4631]: I1129 05:05:59.204259 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:59 crc kubenswrapper[4631]: I1129 05:05:59.204807 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:59 crc kubenswrapper[4631]: I1129 05:05:59.277977 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:59 crc kubenswrapper[4631]: I1129 05:05:59.463197 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:05:59 crc kubenswrapper[4631]: I1129 05:05:59.517073 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rr7dz"] Nov 29 05:06:01 crc kubenswrapper[4631]: I1129 05:06:01.216521 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:06:01 crc kubenswrapper[4631]: E1129 05:06:01.217133 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:06:01 crc kubenswrapper[4631]: I1129 05:06:01.434542 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rr7dz" podUID="68a32e0c-4209-4aff-9524-d935d9408cea" containerName="registry-server" containerID="cri-o://d4df83655014ab1e904cfa2dedcb23b42794a09af9c92143bc4610fd1232b89a" gracePeriod=2 Nov 29 05:06:02 crc kubenswrapper[4631]: I1129 05:06:02.465605 4631 generic.go:334] "Generic (PLEG): container finished" podID="68a32e0c-4209-4aff-9524-d935d9408cea" containerID="d4df83655014ab1e904cfa2dedcb23b42794a09af9c92143bc4610fd1232b89a" exitCode=0 Nov 29 05:06:02 crc kubenswrapper[4631]: I1129 05:06:02.465991 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rr7dz" event={"ID":"68a32e0c-4209-4aff-9524-d935d9408cea","Type":"ContainerDied","Data":"d4df83655014ab1e904cfa2dedcb23b42794a09af9c92143bc4610fd1232b89a"} Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.132752 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.268502 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vrbv\" (UniqueName: \"kubernetes.io/projected/68a32e0c-4209-4aff-9524-d935d9408cea-kube-api-access-8vrbv\") pod \"68a32e0c-4209-4aff-9524-d935d9408cea\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.269779 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-catalog-content\") pod \"68a32e0c-4209-4aff-9524-d935d9408cea\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.269917 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-utilities\") pod \"68a32e0c-4209-4aff-9524-d935d9408cea\" (UID: \"68a32e0c-4209-4aff-9524-d935d9408cea\") " Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.271858 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-utilities" (OuterVolumeSpecName: "utilities") pod "68a32e0c-4209-4aff-9524-d935d9408cea" (UID: "68a32e0c-4209-4aff-9524-d935d9408cea"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.277458 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68a32e0c-4209-4aff-9524-d935d9408cea-kube-api-access-8vrbv" (OuterVolumeSpecName: "kube-api-access-8vrbv") pod "68a32e0c-4209-4aff-9524-d935d9408cea" (UID: "68a32e0c-4209-4aff-9524-d935d9408cea"). InnerVolumeSpecName "kube-api-access-8vrbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.317199 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "68a32e0c-4209-4aff-9524-d935d9408cea" (UID: "68a32e0c-4209-4aff-9524-d935d9408cea"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.372431 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vrbv\" (UniqueName: \"kubernetes.io/projected/68a32e0c-4209-4aff-9524-d935d9408cea-kube-api-access-8vrbv\") on node \"crc\" DevicePath \"\"" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.372480 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.372492 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68a32e0c-4209-4aff-9524-d935d9408cea-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.516236 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rr7dz" event={"ID":"68a32e0c-4209-4aff-9524-d935d9408cea","Type":"ContainerDied","Data":"873d22805d21ed341d55df9fe29bf9f712e404e0b53d162ded89d28070f3e9cb"} Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.516530 4631 scope.go:117] "RemoveContainer" containerID="d4df83655014ab1e904cfa2dedcb23b42794a09af9c92143bc4610fd1232b89a" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.516266 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rr7dz" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.517878 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/crc-debug-psh9z" event={"ID":"42f2aa61-e097-4301-b7a1-b773528c51c5","Type":"ContainerStarted","Data":"46bd553c09e90cbb74c5a683b895e8294e6b667a3422aacfacb3058f19b5161a"} Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.549387 4631 scope.go:117] "RemoveContainer" containerID="ac124d2dbe38e36132261589c0216ad77326fda4101efe17280569e16a122b40" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.573449 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-5sd8j/crc-debug-psh9z" podStartSLOduration=1.788809219 podStartE2EDuration="15.573428708s" podCreationTimestamp="2025-11-29 05:05:53 +0000 UTC" firstStartedPulling="2025-11-29 05:05:54.047302225 +0000 UTC m=+3281.111805739" lastFinishedPulling="2025-11-29 05:06:07.831921714 +0000 UTC m=+3294.896425228" observedRunningTime="2025-11-29 05:06:08.54545588 +0000 UTC m=+3295.609959394" watchObservedRunningTime="2025-11-29 05:06:08.573428708 +0000 UTC m=+3295.637932222" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.575906 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rr7dz"] Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.584667 4631 scope.go:117] "RemoveContainer" containerID="bc20007e8e317751373eee01f244551f9212920ee7bf93fbdfab218d3040c7b4" Nov 29 05:06:08 crc kubenswrapper[4631]: I1129 05:06:08.585116 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rr7dz"] Nov 29 05:06:09 crc kubenswrapper[4631]: I1129 05:06:09.280408 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68a32e0c-4209-4aff-9524-d935d9408cea" path="/var/lib/kubelet/pods/68a32e0c-4209-4aff-9524-d935d9408cea/volumes" Nov 29 05:06:13 crc kubenswrapper[4631]: I1129 05:06:13.224709 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:06:13 crc kubenswrapper[4631]: E1129 05:06:13.225274 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:06:24 crc kubenswrapper[4631]: I1129 05:06:24.216726 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:06:24 crc kubenswrapper[4631]: E1129 05:06:24.218233 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:06:39 crc kubenswrapper[4631]: I1129 05:06:39.219209 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:06:39 crc kubenswrapper[4631]: E1129 05:06:39.221484 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:06:46 crc kubenswrapper[4631]: I1129 05:06:46.851175 4631 generic.go:334] "Generic (PLEG): container finished" podID="42f2aa61-e097-4301-b7a1-b773528c51c5" containerID="46bd553c09e90cbb74c5a683b895e8294e6b667a3422aacfacb3058f19b5161a" exitCode=0 Nov 29 05:06:46 crc kubenswrapper[4631]: I1129 05:06:46.851254 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/crc-debug-psh9z" event={"ID":"42f2aa61-e097-4301-b7a1-b773528c51c5","Type":"ContainerDied","Data":"46bd553c09e90cbb74c5a683b895e8294e6b667a3422aacfacb3058f19b5161a"} Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.045192 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-psh9z" Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.063627 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/42f2aa61-e097-4301-b7a1-b773528c51c5-host\") pod \"42f2aa61-e097-4301-b7a1-b773528c51c5\" (UID: \"42f2aa61-e097-4301-b7a1-b773528c51c5\") " Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.063979 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znjn6\" (UniqueName: \"kubernetes.io/projected/42f2aa61-e097-4301-b7a1-b773528c51c5-kube-api-access-znjn6\") pod \"42f2aa61-e097-4301-b7a1-b773528c51c5\" (UID: \"42f2aa61-e097-4301-b7a1-b773528c51c5\") " Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.064371 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42f2aa61-e097-4301-b7a1-b773528c51c5-host" (OuterVolumeSpecName: "host") pod "42f2aa61-e097-4301-b7a1-b773528c51c5" (UID: "42f2aa61-e097-4301-b7a1-b773528c51c5"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.067589 4631 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/42f2aa61-e097-4301-b7a1-b773528c51c5-host\") on node \"crc\" DevicePath \"\"" Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.071243 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42f2aa61-e097-4301-b7a1-b773528c51c5-kube-api-access-znjn6" (OuterVolumeSpecName: "kube-api-access-znjn6") pod "42f2aa61-e097-4301-b7a1-b773528c51c5" (UID: "42f2aa61-e097-4301-b7a1-b773528c51c5"). InnerVolumeSpecName "kube-api-access-znjn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.091052 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5sd8j/crc-debug-psh9z"] Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.105473 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5sd8j/crc-debug-psh9z"] Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.169545 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znjn6\" (UniqueName: \"kubernetes.io/projected/42f2aa61-e097-4301-b7a1-b773528c51c5-kube-api-access-znjn6\") on node \"crc\" DevicePath \"\"" Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.923224 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0826e0f1f7f89df359440cbe86105b88c3df8d02c3a3f4e2d376334c4d394ba" Nov 29 05:06:48 crc kubenswrapper[4631]: I1129 05:06:48.923322 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-psh9z" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.227591 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42f2aa61-e097-4301-b7a1-b773528c51c5" path="/var/lib/kubelet/pods/42f2aa61-e097-4301-b7a1-b773528c51c5/volumes" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.292263 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5sd8j/crc-debug-hbz6r"] Nov 29 05:06:49 crc kubenswrapper[4631]: E1129 05:06:49.292698 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42f2aa61-e097-4301-b7a1-b773528c51c5" containerName="container-00" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.292720 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="42f2aa61-e097-4301-b7a1-b773528c51c5" containerName="container-00" Nov 29 05:06:49 crc kubenswrapper[4631]: E1129 05:06:49.292752 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68a32e0c-4209-4aff-9524-d935d9408cea" containerName="registry-server" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.292760 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="68a32e0c-4209-4aff-9524-d935d9408cea" containerName="registry-server" Nov 29 05:06:49 crc kubenswrapper[4631]: E1129 05:06:49.292787 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68a32e0c-4209-4aff-9524-d935d9408cea" containerName="extract-utilities" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.292795 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="68a32e0c-4209-4aff-9524-d935d9408cea" containerName="extract-utilities" Nov 29 05:06:49 crc kubenswrapper[4631]: E1129 05:06:49.292816 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68a32e0c-4209-4aff-9524-d935d9408cea" containerName="extract-content" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.292824 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="68a32e0c-4209-4aff-9524-d935d9408cea" containerName="extract-content" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.293031 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="68a32e0c-4209-4aff-9524-d935d9408cea" containerName="registry-server" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.293063 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="42f2aa61-e097-4301-b7a1-b773528c51c5" containerName="container-00" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.293795 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.390812 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/da7b3c16-af51-4bd1-add5-aeb64607c759-host\") pod \"crc-debug-hbz6r\" (UID: \"da7b3c16-af51-4bd1-add5-aeb64607c759\") " pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.390947 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmz28\" (UniqueName: \"kubernetes.io/projected/da7b3c16-af51-4bd1-add5-aeb64607c759-kube-api-access-pmz28\") pod \"crc-debug-hbz6r\" (UID: \"da7b3c16-af51-4bd1-add5-aeb64607c759\") " pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.493228 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmz28\" (UniqueName: \"kubernetes.io/projected/da7b3c16-af51-4bd1-add5-aeb64607c759-kube-api-access-pmz28\") pod \"crc-debug-hbz6r\" (UID: \"da7b3c16-af51-4bd1-add5-aeb64607c759\") " pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.493447 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/da7b3c16-af51-4bd1-add5-aeb64607c759-host\") pod \"crc-debug-hbz6r\" (UID: \"da7b3c16-af51-4bd1-add5-aeb64607c759\") " pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.493666 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/da7b3c16-af51-4bd1-add5-aeb64607c759-host\") pod \"crc-debug-hbz6r\" (UID: \"da7b3c16-af51-4bd1-add5-aeb64607c759\") " pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.510718 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmz28\" (UniqueName: \"kubernetes.io/projected/da7b3c16-af51-4bd1-add5-aeb64607c759-kube-api-access-pmz28\") pod \"crc-debug-hbz6r\" (UID: \"da7b3c16-af51-4bd1-add5-aeb64607c759\") " pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.608185 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.931954 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" event={"ID":"da7b3c16-af51-4bd1-add5-aeb64607c759","Type":"ContainerStarted","Data":"2971ce6b5ba7a22c8b01b61e19ec83ce3a820125f171fb4d5700e6386751ea37"} Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.932350 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" event={"ID":"da7b3c16-af51-4bd1-add5-aeb64607c759","Type":"ContainerStarted","Data":"8cc862d8b3bd88dd826b1ec823c2da20d408cc88fe1984df56bb711c81707df6"} Nov 29 05:06:49 crc kubenswrapper[4631]: I1129 05:06:49.946927 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" podStartSLOduration=0.94690657 podStartE2EDuration="946.90657ms" podCreationTimestamp="2025-11-29 05:06:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 05:06:49.945212498 +0000 UTC m=+3337.009716022" watchObservedRunningTime="2025-11-29 05:06:49.94690657 +0000 UTC m=+3337.011410084" Nov 29 05:06:50 crc kubenswrapper[4631]: I1129 05:06:50.216023 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:06:50 crc kubenswrapper[4631]: E1129 05:06:50.216687 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:06:50 crc kubenswrapper[4631]: I1129 05:06:50.946872 4631 generic.go:334] "Generic (PLEG): container finished" podID="da7b3c16-af51-4bd1-add5-aeb64607c759" containerID="2971ce6b5ba7a22c8b01b61e19ec83ce3a820125f171fb4d5700e6386751ea37" exitCode=0 Nov 29 05:06:50 crc kubenswrapper[4631]: I1129 05:06:50.946935 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" event={"ID":"da7b3c16-af51-4bd1-add5-aeb64607c759","Type":"ContainerDied","Data":"2971ce6b5ba7a22c8b01b61e19ec83ce3a820125f171fb4d5700e6386751ea37"} Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.103281 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.135099 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/da7b3c16-af51-4bd1-add5-aeb64607c759-host\") pod \"da7b3c16-af51-4bd1-add5-aeb64607c759\" (UID: \"da7b3c16-af51-4bd1-add5-aeb64607c759\") " Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.135464 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmz28\" (UniqueName: \"kubernetes.io/projected/da7b3c16-af51-4bd1-add5-aeb64607c759-kube-api-access-pmz28\") pod \"da7b3c16-af51-4bd1-add5-aeb64607c759\" (UID: \"da7b3c16-af51-4bd1-add5-aeb64607c759\") " Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.135465 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/da7b3c16-af51-4bd1-add5-aeb64607c759-host" (OuterVolumeSpecName: "host") pod "da7b3c16-af51-4bd1-add5-aeb64607c759" (UID: "da7b3c16-af51-4bd1-add5-aeb64607c759"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.135950 4631 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/da7b3c16-af51-4bd1-add5-aeb64607c759-host\") on node \"crc\" DevicePath \"\"" Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.147566 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da7b3c16-af51-4bd1-add5-aeb64607c759-kube-api-access-pmz28" (OuterVolumeSpecName: "kube-api-access-pmz28") pod "da7b3c16-af51-4bd1-add5-aeb64607c759" (UID: "da7b3c16-af51-4bd1-add5-aeb64607c759"). InnerVolumeSpecName "kube-api-access-pmz28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.148898 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5sd8j/crc-debug-hbz6r"] Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.157741 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5sd8j/crc-debug-hbz6r"] Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.252747 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmz28\" (UniqueName: \"kubernetes.io/projected/da7b3c16-af51-4bd1-add5-aeb64607c759-kube-api-access-pmz28\") on node \"crc\" DevicePath \"\"" Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.978760 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8cc862d8b3bd88dd826b1ec823c2da20d408cc88fe1984df56bb711c81707df6" Nov 29 05:06:52 crc kubenswrapper[4631]: I1129 05:06:52.979095 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-hbz6r" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.232550 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da7b3c16-af51-4bd1-add5-aeb64607c759" path="/var/lib/kubelet/pods/da7b3c16-af51-4bd1-add5-aeb64607c759/volumes" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.385701 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5sd8j/crc-debug-lcssg"] Nov 29 05:06:53 crc kubenswrapper[4631]: E1129 05:06:53.386182 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da7b3c16-af51-4bd1-add5-aeb64607c759" containerName="container-00" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.386205 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="da7b3c16-af51-4bd1-add5-aeb64607c759" containerName="container-00" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.386435 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="da7b3c16-af51-4bd1-add5-aeb64607c759" containerName="container-00" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.387275 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-lcssg" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.580048 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-host\") pod \"crc-debug-lcssg\" (UID: \"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1\") " pod="openshift-must-gather-5sd8j/crc-debug-lcssg" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.580836 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzdpg\" (UniqueName: \"kubernetes.io/projected/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-kube-api-access-nzdpg\") pod \"crc-debug-lcssg\" (UID: \"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1\") " pod="openshift-must-gather-5sd8j/crc-debug-lcssg" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.683191 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-host\") pod \"crc-debug-lcssg\" (UID: \"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1\") " pod="openshift-must-gather-5sd8j/crc-debug-lcssg" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.683474 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-host\") pod \"crc-debug-lcssg\" (UID: \"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1\") " pod="openshift-must-gather-5sd8j/crc-debug-lcssg" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.683490 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzdpg\" (UniqueName: \"kubernetes.io/projected/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-kube-api-access-nzdpg\") pod \"crc-debug-lcssg\" (UID: \"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1\") " pod="openshift-must-gather-5sd8j/crc-debug-lcssg" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.708724 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzdpg\" (UniqueName: \"kubernetes.io/projected/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-kube-api-access-nzdpg\") pod \"crc-debug-lcssg\" (UID: \"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1\") " pod="openshift-must-gather-5sd8j/crc-debug-lcssg" Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.714402 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-lcssg" Nov 29 05:06:53 crc kubenswrapper[4631]: W1129 05:06:53.761979 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddcdedb20_6a4a_483d_a970_3c6b4bb45ee1.slice/crio-dbe04a5b1af5e35f42ca6747fa66d546a61524a8f27c54b8528ca85641f7ac48 WatchSource:0}: Error finding container dbe04a5b1af5e35f42ca6747fa66d546a61524a8f27c54b8528ca85641f7ac48: Status 404 returned error can't find the container with id dbe04a5b1af5e35f42ca6747fa66d546a61524a8f27c54b8528ca85641f7ac48 Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.996925 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/crc-debug-lcssg" event={"ID":"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1","Type":"ContainerStarted","Data":"695b71170e7f143208a88fe3b4ca504ec4984f3a8192e5c3a80d720ff1c516f2"} Nov 29 05:06:53 crc kubenswrapper[4631]: I1129 05:06:53.997179 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/crc-debug-lcssg" event={"ID":"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1","Type":"ContainerStarted","Data":"dbe04a5b1af5e35f42ca6747fa66d546a61524a8f27c54b8528ca85641f7ac48"} Nov 29 05:06:54 crc kubenswrapper[4631]: I1129 05:06:54.015215 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-5sd8j/crc-debug-lcssg" podStartSLOduration=1.015198482 podStartE2EDuration="1.015198482s" podCreationTimestamp="2025-11-29 05:06:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 05:06:54.013883759 +0000 UTC m=+3341.078387293" watchObservedRunningTime="2025-11-29 05:06:54.015198482 +0000 UTC m=+3341.079701996" Nov 29 05:06:55 crc kubenswrapper[4631]: I1129 05:06:55.009301 4631 generic.go:334] "Generic (PLEG): container finished" podID="dcdedb20-6a4a-483d-a970-3c6b4bb45ee1" containerID="695b71170e7f143208a88fe3b4ca504ec4984f3a8192e5c3a80d720ff1c516f2" exitCode=0 Nov 29 05:06:55 crc kubenswrapper[4631]: I1129 05:06:55.009411 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/crc-debug-lcssg" event={"ID":"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1","Type":"ContainerDied","Data":"695b71170e7f143208a88fe3b4ca504ec4984f3a8192e5c3a80d720ff1c516f2"} Nov 29 05:06:56 crc kubenswrapper[4631]: I1129 05:06:56.122058 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-lcssg" Nov 29 05:06:56 crc kubenswrapper[4631]: I1129 05:06:56.158141 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5sd8j/crc-debug-lcssg"] Nov 29 05:06:56 crc kubenswrapper[4631]: I1129 05:06:56.165595 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5sd8j/crc-debug-lcssg"] Nov 29 05:06:56 crc kubenswrapper[4631]: I1129 05:06:56.244130 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-host\") pod \"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1\" (UID: \"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1\") " Nov 29 05:06:56 crc kubenswrapper[4631]: I1129 05:06:56.244705 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-host" (OuterVolumeSpecName: "host") pod "dcdedb20-6a4a-483d-a970-3c6b4bb45ee1" (UID: "dcdedb20-6a4a-483d-a970-3c6b4bb45ee1"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 05:06:56 crc kubenswrapper[4631]: I1129 05:06:56.247824 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzdpg\" (UniqueName: \"kubernetes.io/projected/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-kube-api-access-nzdpg\") pod \"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1\" (UID: \"dcdedb20-6a4a-483d-a970-3c6b4bb45ee1\") " Nov 29 05:06:56 crc kubenswrapper[4631]: I1129 05:06:56.251081 4631 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-host\") on node \"crc\" DevicePath \"\"" Nov 29 05:06:56 crc kubenswrapper[4631]: I1129 05:06:56.255505 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-kube-api-access-nzdpg" (OuterVolumeSpecName: "kube-api-access-nzdpg") pod "dcdedb20-6a4a-483d-a970-3c6b4bb45ee1" (UID: "dcdedb20-6a4a-483d-a970-3c6b4bb45ee1"). InnerVolumeSpecName "kube-api-access-nzdpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:06:56 crc kubenswrapper[4631]: I1129 05:06:56.352848 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzdpg\" (UniqueName: \"kubernetes.io/projected/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1-kube-api-access-nzdpg\") on node \"crc\" DevicePath \"\"" Nov 29 05:06:57 crc kubenswrapper[4631]: I1129 05:06:57.031322 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbe04a5b1af5e35f42ca6747fa66d546a61524a8f27c54b8528ca85641f7ac48" Nov 29 05:06:57 crc kubenswrapper[4631]: I1129 05:06:57.031435 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/crc-debug-lcssg" Nov 29 05:06:57 crc kubenswrapper[4631]: I1129 05:06:57.225443 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcdedb20-6a4a-483d-a970-3c6b4bb45ee1" path="/var/lib/kubelet/pods/dcdedb20-6a4a-483d-a970-3c6b4bb45ee1/volumes" Nov 29 05:07:05 crc kubenswrapper[4631]: I1129 05:07:05.216110 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:07:05 crc kubenswrapper[4631]: E1129 05:07:05.216810 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:07:12 crc kubenswrapper[4631]: I1129 05:07:12.818028 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-fb967fbcd-pqplm_f7aced22-8f95-4c19-b6c6-f56a84ae29e0/barbican-api/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.010400 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5555f74b94-58bwl_f3ff1d2b-0fc8-49dc-a02f-948b81d54988/barbican-keystone-listener/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.080231 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-fb967fbcd-pqplm_f7aced22-8f95-4c19-b6c6-f56a84ae29e0/barbican-api-log/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.243573 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5555f74b94-58bwl_f3ff1d2b-0fc8-49dc-a02f-948b81d54988/barbican-keystone-listener-log/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.409414 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-777b8bd98c-blh5p_e5880470-8751-4613-82c7-33efabd35a6e/barbican-worker/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.428231 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-777b8bd98c-blh5p_e5880470-8751-4613-82c7-33efabd35a6e/barbican-worker-log/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.540922 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj_c7c0f56e-3925-47a4-9516-9c9d662540db/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.687044 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ad1ef30-44b5-455b-86a8-136862164eba/ceilometer-central-agent/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.721148 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ad1ef30-44b5-455b-86a8-136862164eba/ceilometer-notification-agent/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.795554 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ad1ef30-44b5-455b-86a8-136862164eba/proxy-httpd/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.858979 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ad1ef30-44b5-455b-86a8-136862164eba/sg-core/0.log" Nov 29 05:07:13 crc kubenswrapper[4631]: I1129 05:07:13.989806 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_4bb283dc-d495-4398-a0d1-da97df47ffbd/cinder-api/0.log" Nov 29 05:07:14 crc kubenswrapper[4631]: I1129 05:07:14.092784 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_4bb283dc-d495-4398-a0d1-da97df47ffbd/cinder-api-log/0.log" Nov 29 05:07:14 crc kubenswrapper[4631]: I1129 05:07:14.147898 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_33fff273-753a-43d7-ad1d-3d8dd9d3f373/cinder-scheduler/0.log" Nov 29 05:07:14 crc kubenswrapper[4631]: I1129 05:07:14.254369 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_33fff273-753a-43d7-ad1d-3d8dd9d3f373/probe/0.log" Nov 29 05:07:14 crc kubenswrapper[4631]: I1129 05:07:14.422149 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-26cwd_6cc48245-72ab-4e25-91b3-c98fe56e9869/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:14 crc kubenswrapper[4631]: I1129 05:07:14.459212 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5_12e84ebd-9825-4ef2-9356-626fdc73dbb8/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:14 crc kubenswrapper[4631]: I1129 05:07:14.657296 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-9mfkk_9332abff-609f-424f-8f3c-b72b461489db/init/0.log" Nov 29 05:07:14 crc kubenswrapper[4631]: I1129 05:07:14.840997 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-9mfkk_9332abff-609f-424f-8f3c-b72b461489db/init/0.log" Nov 29 05:07:14 crc kubenswrapper[4631]: I1129 05:07:14.884945 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj_919eea43-11e7-42f0-8d23-e46e7cbc5359/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:14 crc kubenswrapper[4631]: I1129 05:07:14.933594 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-9mfkk_9332abff-609f-424f-8f3c-b72b461489db/dnsmasq-dns/0.log" Nov 29 05:07:15 crc kubenswrapper[4631]: I1129 05:07:15.098830 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_0b904c4b-913c-4fc2-8037-94300918d367/glance-httpd/0.log" Nov 29 05:07:15 crc kubenswrapper[4631]: I1129 05:07:15.153116 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_0b904c4b-913c-4fc2-8037-94300918d367/glance-log/0.log" Nov 29 05:07:15 crc kubenswrapper[4631]: I1129 05:07:15.339669 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_a356aef9-8e14-4c39-92b9-d32402e357ad/glance-httpd/0.log" Nov 29 05:07:15 crc kubenswrapper[4631]: I1129 05:07:15.390626 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_a356aef9-8e14-4c39-92b9-d32402e357ad/glance-log/0.log" Nov 29 05:07:15 crc kubenswrapper[4631]: I1129 05:07:15.621527 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5964d597b6-rfcr2_e2a6410f-6c69-4b87-a247-b285aef98b71/horizon/1.log" Nov 29 05:07:15 crc kubenswrapper[4631]: I1129 05:07:15.688512 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5964d597b6-rfcr2_e2a6410f-6c69-4b87-a247-b285aef98b71/horizon/0.log" Nov 29 05:07:15 crc kubenswrapper[4631]: I1129 05:07:15.884982 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5964d597b6-rfcr2_e2a6410f-6c69-4b87-a247-b285aef98b71/horizon-log/0.log" Nov 29 05:07:15 crc kubenswrapper[4631]: I1129 05:07:15.938701 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2_d39ef35e-1420-489c-9637-c89eb39ba398/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:16 crc kubenswrapper[4631]: I1129 05:07:16.085139 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-6jptz_cf5098d8-d84f-4749-87ad-6772a3ac8b4e/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:16 crc kubenswrapper[4631]: I1129 05:07:16.405886 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29406541-z5vp6_1f61da44-0694-4d79-b156-8ed5d0358a3f/keystone-cron/0.log" Nov 29 05:07:16 crc kubenswrapper[4631]: I1129 05:07:16.437313 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6ff47d4689-gnj7t_7771bd0b-e533-499d-9b8a-9071eb930e26/keystone-api/0.log" Nov 29 05:07:16 crc kubenswrapper[4631]: I1129 05:07:16.666820 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_c4c8b52f-72c8-4aac-9c57-df83ec5dfe20/kube-state-metrics/0.log" Nov 29 05:07:16 crc kubenswrapper[4631]: I1129 05:07:16.706130 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-vr72m_4c75d7a8-5a02-4b4b-8af4-e83e594a096f/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:17 crc kubenswrapper[4631]: I1129 05:07:17.042358 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7d7446b849-nsq65_7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9/neutron-api/0.log" Nov 29 05:07:17 crc kubenswrapper[4631]: I1129 05:07:17.095832 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7d7446b849-nsq65_7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9/neutron-httpd/0.log" Nov 29 05:07:17 crc kubenswrapper[4631]: I1129 05:07:17.543523 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8_62539716-e710-4274-a860-22590e2d5861/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:17 crc kubenswrapper[4631]: I1129 05:07:17.959549 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_52e9bc32-6412-4929-be28-61ac7021c100/nova-api-log/0.log" Nov 29 05:07:18 crc kubenswrapper[4631]: I1129 05:07:18.004248 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_52e9bc32-6412-4929-be28-61ac7021c100/nova-api-api/0.log" Nov 29 05:07:18 crc kubenswrapper[4631]: I1129 05:07:18.009965 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_2c6f9871-305a-473f-8610-475ad792012a/nova-cell0-conductor-conductor/0.log" Nov 29 05:07:18 crc kubenswrapper[4631]: I1129 05:07:18.216539 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:07:18 crc kubenswrapper[4631]: E1129 05:07:18.216782 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:07:18 crc kubenswrapper[4631]: I1129 05:07:18.273481 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_b334d5ab-4b3b-436f-bf43-54c2a6a511b0/nova-cell1-conductor-conductor/0.log" Nov 29 05:07:18 crc kubenswrapper[4631]: I1129 05:07:18.378650 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_462ee7da-139d-4cd7-91c2-6bb6b02f9b57/nova-cell1-novncproxy-novncproxy/0.log" Nov 29 05:07:18 crc kubenswrapper[4631]: I1129 05:07:18.596287 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-42rzz_4dc57e41-ec49-4fda-86a7-2d339d19003b/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:18 crc kubenswrapper[4631]: I1129 05:07:18.851208 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_a5610ba4-568f-4281-90cd-7b4a187a9884/nova-metadata-log/0.log" Nov 29 05:07:19 crc kubenswrapper[4631]: I1129 05:07:19.116315 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_6926c23d-5598-420e-b328-c95ffe4d1475/nova-scheduler-scheduler/0.log" Nov 29 05:07:19 crc kubenswrapper[4631]: I1129 05:07:19.266771 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c9989c8e-3a12-49c9-89e0-d13778a4c3d4/mysql-bootstrap/0.log" Nov 29 05:07:19 crc kubenswrapper[4631]: I1129 05:07:19.478294 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c9989c8e-3a12-49c9-89e0-d13778a4c3d4/galera/0.log" Nov 29 05:07:19 crc kubenswrapper[4631]: I1129 05:07:19.553243 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c9989c8e-3a12-49c9-89e0-d13778a4c3d4/mysql-bootstrap/0.log" Nov 29 05:07:19 crc kubenswrapper[4631]: I1129 05:07:19.750787 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_2197f066-a879-4131-9e49-4d188a01db93/mysql-bootstrap/0.log" Nov 29 05:07:19 crc kubenswrapper[4631]: I1129 05:07:19.925242 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_2197f066-a879-4131-9e49-4d188a01db93/mysql-bootstrap/0.log" Nov 29 05:07:19 crc kubenswrapper[4631]: I1129 05:07:19.934457 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_a5610ba4-568f-4281-90cd-7b4a187a9884/nova-metadata-metadata/0.log" Nov 29 05:07:19 crc kubenswrapper[4631]: I1129 05:07:19.999181 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_2197f066-a879-4131-9e49-4d188a01db93/galera/0.log" Nov 29 05:07:20 crc kubenswrapper[4631]: I1129 05:07:20.458790 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_55accadf-0ac2-4a6e-a640-6b47845f939f/openstackclient/0.log" Nov 29 05:07:20 crc kubenswrapper[4631]: I1129 05:07:20.637081 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-fc5cp_9dac72cc-94dd-4863-92c6-99296142fafb/ovn-controller/0.log" Nov 29 05:07:20 crc kubenswrapper[4631]: I1129 05:07:20.736446 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wc5tp_3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf/openstack-network-exporter/0.log" Nov 29 05:07:21 crc kubenswrapper[4631]: I1129 05:07:21.004359 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kl2kj_1334e52e-4dbd-4c2d-bd05-d19f59ef722b/ovsdb-server-init/0.log" Nov 29 05:07:21 crc kubenswrapper[4631]: I1129 05:07:21.159664 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kl2kj_1334e52e-4dbd-4c2d-bd05-d19f59ef722b/ovsdb-server-init/0.log" Nov 29 05:07:21 crc kubenswrapper[4631]: I1129 05:07:21.193420 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kl2kj_1334e52e-4dbd-4c2d-bd05-d19f59ef722b/ovs-vswitchd/0.log" Nov 29 05:07:21 crc kubenswrapper[4631]: I1129 05:07:21.205942 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kl2kj_1334e52e-4dbd-4c2d-bd05-d19f59ef722b/ovsdb-server/0.log" Nov 29 05:07:21 crc kubenswrapper[4631]: I1129 05:07:21.427759 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-msq8v_1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:21 crc kubenswrapper[4631]: I1129 05:07:21.459627 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7108e5ce-c50c-44e2-971f-9a22a4370b52/openstack-network-exporter/0.log" Nov 29 05:07:21 crc kubenswrapper[4631]: I1129 05:07:21.597863 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7108e5ce-c50c-44e2-971f-9a22a4370b52/ovn-northd/0.log" Nov 29 05:07:21 crc kubenswrapper[4631]: I1129 05:07:21.720315 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5bd78ee0-c12e-4d6b-a47d-3652c3150c8d/openstack-network-exporter/0.log" Nov 29 05:07:21 crc kubenswrapper[4631]: I1129 05:07:21.825619 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5bd78ee0-c12e-4d6b-a47d-3652c3150c8d/ovsdbserver-nb/0.log" Nov 29 05:07:22 crc kubenswrapper[4631]: I1129 05:07:22.000311 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_c80339e5-63b2-451d-a7fb-25ef7a2fba6a/openstack-network-exporter/0.log" Nov 29 05:07:22 crc kubenswrapper[4631]: I1129 05:07:22.050541 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_c80339e5-63b2-451d-a7fb-25ef7a2fba6a/ovsdbserver-sb/0.log" Nov 29 05:07:22 crc kubenswrapper[4631]: I1129 05:07:22.250663 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6c8fbfb7d4-6m5ww_1d801d33-e580-4849-ab8b-6f2a21118b1f/placement-api/0.log" Nov 29 05:07:22 crc kubenswrapper[4631]: I1129 05:07:22.343184 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6c8fbfb7d4-6m5ww_1d801d33-e580-4849-ab8b-6f2a21118b1f/placement-log/0.log" Nov 29 05:07:22 crc kubenswrapper[4631]: I1129 05:07:22.423781 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_65e517ae-4586-44d4-b7d7-0f8f3f23e11f/setup-container/0.log" Nov 29 05:07:22 crc kubenswrapper[4631]: I1129 05:07:22.622146 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_65e517ae-4586-44d4-b7d7-0f8f3f23e11f/setup-container/0.log" Nov 29 05:07:22 crc kubenswrapper[4631]: I1129 05:07:22.710434 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_65e517ae-4586-44d4-b7d7-0f8f3f23e11f/rabbitmq/0.log" Nov 29 05:07:22 crc kubenswrapper[4631]: I1129 05:07:22.740929 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_58d4504e-5552-4a9b-8ef8-6442b51cccf1/setup-container/0.log" Nov 29 05:07:22 crc kubenswrapper[4631]: I1129 05:07:22.992386 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_58d4504e-5552-4a9b-8ef8-6442b51cccf1/setup-container/0.log" Nov 29 05:07:23 crc kubenswrapper[4631]: I1129 05:07:23.014780 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_58d4504e-5552-4a9b-8ef8-6442b51cccf1/rabbitmq/0.log" Nov 29 05:07:23 crc kubenswrapper[4631]: I1129 05:07:23.081135 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86_6b0158a9-97f1-46c7-a984-b1e6876d4b57/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:23 crc kubenswrapper[4631]: I1129 05:07:23.257823 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-c777z_b9f1d0ab-6519-4eb8-aacc-539fb0e6433c/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:23 crc kubenswrapper[4631]: I1129 05:07:23.422154 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d_811129e9-11e0-4619-b4ca-1779ce7ae461/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:23 crc kubenswrapper[4631]: I1129 05:07:23.674734 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-r2trh_25a7e95d-1575-46e8-9ab3-a21aa8f08b3d/ssh-known-hosts-edpm-deployment/0.log" Nov 29 05:07:23 crc kubenswrapper[4631]: I1129 05:07:23.702398 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-gcswr_f32f1eab-b255-4518-ba67-d9a46362d4f7/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:23 crc kubenswrapper[4631]: I1129 05:07:23.956269 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-76cbc8bc95-pd9d4_dfe18059-91e5-40e0-a1df-f5f56cf4c0d2/proxy-server/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.062867 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-76cbc8bc95-pd9d4_dfe18059-91e5-40e0-a1df-f5f56cf4c0d2/proxy-httpd/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.137812 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-s8gk2_cfd7f275-e7d1-4239-b55a-b0566664e6bf/swift-ring-rebalance/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.326706 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/account-auditor/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.336613 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/account-reaper/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.427055 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/account-replicator/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.519864 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/container-auditor/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.599950 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/account-server/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.655545 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/container-replicator/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.755872 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/container-server/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.810519 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/container-updater/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.942823 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/object-expirer/0.log" Nov 29 05:07:24 crc kubenswrapper[4631]: I1129 05:07:24.987375 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/object-auditor/0.log" Nov 29 05:07:25 crc kubenswrapper[4631]: I1129 05:07:25.027594 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/object-replicator/0.log" Nov 29 05:07:25 crc kubenswrapper[4631]: I1129 05:07:25.055695 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/object-server/0.log" Nov 29 05:07:25 crc kubenswrapper[4631]: I1129 05:07:25.154673 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/object-updater/0.log" Nov 29 05:07:25 crc kubenswrapper[4631]: I1129 05:07:25.261562 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/rsync/0.log" Nov 29 05:07:25 crc kubenswrapper[4631]: I1129 05:07:25.410204 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/swift-recon-cron/0.log" Nov 29 05:07:25 crc kubenswrapper[4631]: I1129 05:07:25.568910 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc_38877ce3-8e06-44be-9be6-4abb374c32fa/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:25 crc kubenswrapper[4631]: I1129 05:07:25.734784 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb/tempest-tests-tempest-tests-runner/0.log" Nov 29 05:07:25 crc kubenswrapper[4631]: I1129 05:07:25.859924 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_446545e5-fff2-4f7e-960c-26bf33c121bc/test-operator-logs-container/0.log" Nov 29 05:07:26 crc kubenswrapper[4631]: I1129 05:07:26.015805 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn_b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:07:30 crc kubenswrapper[4631]: I1129 05:07:30.216977 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:07:30 crc kubenswrapper[4631]: E1129 05:07:30.217616 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:07:32 crc kubenswrapper[4631]: I1129 05:07:32.860722 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_a706cd5e-48d4-44a0-b8f5-b97ac5e39a16/memcached/0.log" Nov 29 05:07:44 crc kubenswrapper[4631]: I1129 05:07:44.217171 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:07:44 crc kubenswrapper[4631]: E1129 05:07:44.217997 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:07:53 crc kubenswrapper[4631]: I1129 05:07:53.225102 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/util/0.log" Nov 29 05:07:53 crc kubenswrapper[4631]: I1129 05:07:53.452517 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/pull/0.log" Nov 29 05:07:53 crc kubenswrapper[4631]: I1129 05:07:53.636398 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/util/0.log" Nov 29 05:07:53 crc kubenswrapper[4631]: I1129 05:07:53.676921 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/pull/0.log" Nov 29 05:07:53 crc kubenswrapper[4631]: I1129 05:07:53.829259 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/pull/0.log" Nov 29 05:07:53 crc kubenswrapper[4631]: I1129 05:07:53.842022 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/util/0.log" Nov 29 05:07:53 crc kubenswrapper[4631]: I1129 05:07:53.867502 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/extract/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.028919 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-dp4b9_51f70cd4-a679-426f-9467-1702bb980ada/kube-rbac-proxy/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.069710 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-dp4b9_51f70cd4-a679-426f-9467-1702bb980ada/manager/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.137759 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-q4tvc_75e40b24-8291-44fe-bd37-97d493e2c136/kube-rbac-proxy/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.244228 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-q4tvc_75e40b24-8291-44fe-bd37-97d493e2c136/manager/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.333442 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-kzfsd_fc3a4db2-6980-4bc4-aa20-8340eecc513e/kube-rbac-proxy/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.366216 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-kzfsd_fc3a4db2-6980-4bc4-aa20-8340eecc513e/manager/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.478425 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-668d9c48b9-md78v_7214fe12-0140-464c-a856-b1b5482bb635/kube-rbac-proxy/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.571074 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-668d9c48b9-md78v_7214fe12-0140-464c-a856-b1b5482bb635/manager/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.717251 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-h8x8k_5fc71d02-38a2-4998-8cab-e334a10fcd5c/manager/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.719752 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-h8x8k_5fc71d02-38a2-4998-8cab-e334a10fcd5c/kube-rbac-proxy/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.798979 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-5jzhg_1ec069eb-26b3-408c-a4ba-118d01436ecd/kube-rbac-proxy/0.log" Nov 29 05:07:54 crc kubenswrapper[4631]: I1129 05:07:54.926581 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-5jzhg_1ec069eb-26b3-408c-a4ba-118d01436ecd/manager/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.042251 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-zhzc9_7e78b781-84b7-4915-837a-ed1a45d1201e/kube-rbac-proxy/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.151705 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-zhzc9_7e78b781-84b7-4915-837a-ed1a45d1201e/manager/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.206257 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-qp8p2_53a59934-39b4-4b0b-bf3d-da06f41ccf7f/kube-rbac-proxy/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.217905 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:07:55 crc kubenswrapper[4631]: E1129 05:07:55.218182 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.272035 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-qp8p2_53a59934-39b4-4b0b-bf3d-da06f41ccf7f/manager/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.435678 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-6c69d4788d-4q485_a0087618-94aa-4b5f-a590-9e976a84cbbf/kube-rbac-proxy/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.453663 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-6c69d4788d-4q485_a0087618-94aa-4b5f-a590-9e976a84cbbf/manager/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.612955 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-96kdd_46dcc222-f54d-4ddd-bc12-71fd2cfc989c/kube-rbac-proxy/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.652279 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-96kdd_46dcc222-f54d-4ddd-bc12-71fd2cfc989c/manager/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.767371 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-plz68_f4f6e611-da9a-42cb-99f8-59b9784b2671/kube-rbac-proxy/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.813069 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-lp8wb_9429868a-7e85-4c45-a3ff-e05af34c9854/kube-rbac-proxy/0.log" Nov 29 05:07:55 crc kubenswrapper[4631]: I1129 05:07:55.827699 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-plz68_f4f6e611-da9a-42cb-99f8-59b9784b2671/manager/0.log" Nov 29 05:07:56 crc kubenswrapper[4631]: I1129 05:07:56.025569 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-lp8wb_9429868a-7e85-4c45-a3ff-e05af34c9854/manager/0.log" Nov 29 05:07:56 crc kubenswrapper[4631]: I1129 05:07:56.126212 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-6mrwx_01ca8f91-4e45-4bb2-a44f-a17d6701e529/kube-rbac-proxy/0.log" Nov 29 05:07:56 crc kubenswrapper[4631]: I1129 05:07:56.201633 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-6mrwx_01ca8f91-4e45-4bb2-a44f-a17d6701e529/manager/0.log" Nov 29 05:07:56 crc kubenswrapper[4631]: I1129 05:07:56.353785 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-qm568_f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb/kube-rbac-proxy/0.log" Nov 29 05:07:56 crc kubenswrapper[4631]: I1129 05:07:56.353952 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-qm568_f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb/manager/0.log" Nov 29 05:07:56 crc kubenswrapper[4631]: I1129 05:07:56.557728 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf_072818bb-f7b6-4dbc-9885-a3a8c68f9494/kube-rbac-proxy/0.log" Nov 29 05:07:56 crc kubenswrapper[4631]: I1129 05:07:56.566979 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf_072818bb-f7b6-4dbc-9885-a3a8c68f9494/manager/0.log" Nov 29 05:07:57 crc kubenswrapper[4631]: I1129 05:07:57.040048 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-4wzxd_d04d138a-44a6-4666-aa58-8a225d975235/registry-server/0.log" Nov 29 05:07:57 crc kubenswrapper[4631]: I1129 05:07:57.040655 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5478ff79b4-nggr2_d0f57542-df49-4195-91b5-1fc784cba518/operator/0.log" Nov 29 05:07:57 crc kubenswrapper[4631]: I1129 05:07:57.461192 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-j9s5g_82eecaa9-4289-4d37-b953-7c2de1f5a437/manager/0.log" Nov 29 05:07:57 crc kubenswrapper[4631]: I1129 05:07:57.479367 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-j9s5g_82eecaa9-4289-4d37-b953-7c2de1f5a437/kube-rbac-proxy/0.log" Nov 29 05:07:57 crc kubenswrapper[4631]: I1129 05:07:57.550453 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-5xmrz_9812178e-08d5-487d-b42e-1edcca79850b/kube-rbac-proxy/0.log" Nov 29 05:07:57 crc kubenswrapper[4631]: I1129 05:07:57.732093 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-79cbf6968-9cwcq_cb8f6e48-60ac-497b-ab0a-8d556f77a1ce/manager/0.log" Nov 29 05:07:57 crc kubenswrapper[4631]: I1129 05:07:57.746807 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-97pk4_99a5846d-1348-421d-9637-cbd86e552f1c/operator/0.log" Nov 29 05:07:57 crc kubenswrapper[4631]: I1129 05:07:57.764713 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-5xmrz_9812178e-08d5-487d-b42e-1edcca79850b/manager/0.log" Nov 29 05:07:57 crc kubenswrapper[4631]: I1129 05:07:57.903585 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-rgxmh_7956b653-1bf2-4bec-8246-0a806ef0716d/kube-rbac-proxy/0.log" Nov 29 05:07:57 crc kubenswrapper[4631]: I1129 05:07:57.928978 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-rgxmh_7956b653-1bf2-4bec-8246-0a806ef0716d/manager/0.log" Nov 29 05:07:58 crc kubenswrapper[4631]: I1129 05:07:58.031493 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-spqmt_d1f8729d-3838-42f0-9185-6b4edb74a90f/manager/0.log" Nov 29 05:07:58 crc kubenswrapper[4631]: I1129 05:07:58.051721 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-spqmt_d1f8729d-3838-42f0-9185-6b4edb74a90f/kube-rbac-proxy/0.log" Nov 29 05:07:58 crc kubenswrapper[4631]: I1129 05:07:58.179810 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-7kqbn_fa9bc4b5-9bea-48a5-8d01-1f2cd1957133/kube-rbac-proxy/0.log" Nov 29 05:07:58 crc kubenswrapper[4631]: I1129 05:07:58.286701 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-7kqbn_fa9bc4b5-9bea-48a5-8d01-1f2cd1957133/manager/0.log" Nov 29 05:07:58 crc kubenswrapper[4631]: I1129 05:07:58.288719 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-mns7w_ae831fd7-f5a8-4427-a3c7-64ae0a86281f/kube-rbac-proxy/0.log" Nov 29 05:07:58 crc kubenswrapper[4631]: I1129 05:07:58.370074 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-mns7w_ae831fd7-f5a8-4427-a3c7-64ae0a86281f/manager/0.log" Nov 29 05:08:07 crc kubenswrapper[4631]: I1129 05:08:07.216303 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:08:07 crc kubenswrapper[4631]: E1129 05:08:07.216962 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:08:17 crc kubenswrapper[4631]: I1129 05:08:17.578559 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-x8t8v_8361abea-c6bc-4927-a88b-c8318096d60d/control-plane-machine-set-operator/0.log" Nov 29 05:08:17 crc kubenswrapper[4631]: I1129 05:08:17.753156 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-txv66_5e9aad10-398a-479a-b828-100682ad67c7/machine-api-operator/0.log" Nov 29 05:08:17 crc kubenswrapper[4631]: I1129 05:08:17.753248 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-txv66_5e9aad10-398a-479a-b828-100682ad67c7/kube-rbac-proxy/0.log" Nov 29 05:08:20 crc kubenswrapper[4631]: I1129 05:08:20.217001 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:08:20 crc kubenswrapper[4631]: E1129 05:08:20.218094 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:08:30 crc kubenswrapper[4631]: I1129 05:08:30.693897 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-5gqxn_82882099-8669-4d95-a03f-7da7a69b3865/cert-manager-controller/0.log" Nov 29 05:08:30 crc kubenswrapper[4631]: I1129 05:08:30.823731 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-n4mtg_9f4942a9-5091-4ae9-b7ba-9e9aa329161f/cert-manager-cainjector/0.log" Nov 29 05:08:30 crc kubenswrapper[4631]: I1129 05:08:30.953158 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-66b6l_1cff1f89-8342-4b11-98a6-c6d2cb2bed76/cert-manager-webhook/0.log" Nov 29 05:08:35 crc kubenswrapper[4631]: I1129 05:08:35.216428 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:08:35 crc kubenswrapper[4631]: I1129 05:08:35.991539 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"b6c56a99240518977d61f2c7d5677497c6ecb3a6e851512ce81f757de601d59d"} Nov 29 05:08:43 crc kubenswrapper[4631]: I1129 05:08:43.938270 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-tlptj_09679895-91f4-4a46-a9cb-03bb2bd32537/nmstate-console-plugin/0.log" Nov 29 05:08:44 crc kubenswrapper[4631]: I1129 05:08:44.070081 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-dmhbd_fbd8e3f0-5167-4724-8f85-8648acdb3f6b/nmstate-handler/0.log" Nov 29 05:08:44 crc kubenswrapper[4631]: I1129 05:08:44.193141 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-ncfds_f1476f66-a509-4f8a-937f-b7b5e906a7e2/kube-rbac-proxy/0.log" Nov 29 05:08:44 crc kubenswrapper[4631]: I1129 05:08:44.237383 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-ncfds_f1476f66-a509-4f8a-937f-b7b5e906a7e2/nmstate-metrics/0.log" Nov 29 05:08:44 crc kubenswrapper[4631]: I1129 05:08:44.364050 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-jrhjf_7183e04c-6dbe-4139-8bd2-a217adae2ab6/nmstate-operator/0.log" Nov 29 05:08:44 crc kubenswrapper[4631]: I1129 05:08:44.448004 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-xjsng_a232adff-6fb5-4120-8f41-8bf310322024/nmstate-webhook/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.084175 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-nbzls_40e9d6d5-fd7e-4962-aace-e0fe711eb77d/kube-rbac-proxy/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.148353 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-nbzls_40e9d6d5-fd7e-4962-aace-e0fe711eb77d/controller/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.377976 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-ktl8z_7d0c7592-e989-41cd-b1bb-4ec52e694973/frr-k8s-webhook-server/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.420519 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-frr-files/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.689491 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-reloader/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.731619 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-frr-files/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.731662 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-reloader/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.793790 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-metrics/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.935131 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-frr-files/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.936763 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-reloader/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.980930 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-metrics/0.log" Nov 29 05:08:59 crc kubenswrapper[4631]: I1129 05:08:59.985431 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-metrics/0.log" Nov 29 05:09:00 crc kubenswrapper[4631]: I1129 05:09:00.141396 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-metrics/0.log" Nov 29 05:09:00 crc kubenswrapper[4631]: I1129 05:09:00.250247 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-frr-files/0.log" Nov 29 05:09:00 crc kubenswrapper[4631]: I1129 05:09:00.264656 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-reloader/0.log" Nov 29 05:09:00 crc kubenswrapper[4631]: I1129 05:09:00.292027 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/controller/0.log" Nov 29 05:09:00 crc kubenswrapper[4631]: I1129 05:09:00.442293 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/kube-rbac-proxy/0.log" Nov 29 05:09:00 crc kubenswrapper[4631]: I1129 05:09:00.510881 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/frr-metrics/0.log" Nov 29 05:09:00 crc kubenswrapper[4631]: I1129 05:09:00.526870 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/kube-rbac-proxy-frr/0.log" Nov 29 05:09:00 crc kubenswrapper[4631]: I1129 05:09:00.694870 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/reloader/0.log" Nov 29 05:09:00 crc kubenswrapper[4631]: I1129 05:09:00.822449 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-86d54cd4bb-mfrnm_b9fde2d9-7731-458b-9e00-216a755d629c/manager/0.log" Nov 29 05:09:01 crc kubenswrapper[4631]: I1129 05:09:01.120550 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-8598fb747-q99bx_e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab/webhook-server/0.log" Nov 29 05:09:01 crc kubenswrapper[4631]: I1129 05:09:01.242213 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-rjtb7_b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d/kube-rbac-proxy/0.log" Nov 29 05:09:01 crc kubenswrapper[4631]: I1129 05:09:01.819080 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/frr/0.log" Nov 29 05:09:01 crc kubenswrapper[4631]: I1129 05:09:01.832103 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-rjtb7_b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d/speaker/0.log" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.382491 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/util/0.log" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.626317 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/util/0.log" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.655140 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/pull/0.log" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.676943 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/pull/0.log" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.779173 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sphfk"] Nov 29 05:09:15 crc kubenswrapper[4631]: E1129 05:09:15.784831 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcdedb20-6a4a-483d-a970-3c6b4bb45ee1" containerName="container-00" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.784862 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcdedb20-6a4a-483d-a970-3c6b4bb45ee1" containerName="container-00" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.785534 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcdedb20-6a4a-483d-a970-3c6b4bb45ee1" containerName="container-00" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.787956 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.800446 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sphfk"] Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.874655 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-catalog-content\") pod \"redhat-marketplace-sphfk\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.874729 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-utilities\") pod \"redhat-marketplace-sphfk\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.874866 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t42rm\" (UniqueName: \"kubernetes.io/projected/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-kube-api-access-t42rm\") pod \"redhat-marketplace-sphfk\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.977292 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t42rm\" (UniqueName: \"kubernetes.io/projected/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-kube-api-access-t42rm\") pod \"redhat-marketplace-sphfk\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.977526 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-catalog-content\") pod \"redhat-marketplace-sphfk\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.977555 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-utilities\") pod \"redhat-marketplace-sphfk\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.978090 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-utilities\") pod \"redhat-marketplace-sphfk\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:15 crc kubenswrapper[4631]: I1129 05:09:15.978744 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-catalog-content\") pod \"redhat-marketplace-sphfk\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.004442 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t42rm\" (UniqueName: \"kubernetes.io/projected/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-kube-api-access-t42rm\") pod \"redhat-marketplace-sphfk\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.084513 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/extract/0.log" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.117244 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/util/0.log" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.122746 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.136681 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/pull/0.log" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.393275 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/util/0.log" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.622983 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sphfk"] Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.644307 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/util/0.log" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.645046 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/pull/0.log" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.659049 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/pull/0.log" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.848025 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/util/0.log" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.893378 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/extract/0.log" Nov 29 05:09:16 crc kubenswrapper[4631]: I1129 05:09:16.929779 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/pull/0.log" Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.042267 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-utilities/0.log" Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.239855 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-content/0.log" Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.239962 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-utilities/0.log" Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.313393 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-content/0.log" Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.419090 4631 generic.go:334] "Generic (PLEG): container finished" podID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerID="2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12" exitCode=0 Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.419142 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sphfk" event={"ID":"dc926027-25fa-4fad-9d05-cfb2eb3d0b51","Type":"ContainerDied","Data":"2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12"} Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.419169 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sphfk" event={"ID":"dc926027-25fa-4fad-9d05-cfb2eb3d0b51","Type":"ContainerStarted","Data":"1c3c74858d7e251c6dcadcf7b43615c949bc8b65aa3830f9c298a118f98889fd"} Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.424889 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.484404 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-utilities/0.log" Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.493056 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-content/0.log" Nov 29 05:09:17 crc kubenswrapper[4631]: I1129 05:09:17.694354 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-utilities/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.026139 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-content/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.092381 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-utilities/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.094357 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-content/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.178949 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/registry-server/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.271846 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-utilities/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.308283 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-content/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.428356 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sphfk" event={"ID":"dc926027-25fa-4fad-9d05-cfb2eb3d0b51","Type":"ContainerStarted","Data":"c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da"} Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.568264 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/registry-server/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.588365 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-94k48_15665c92-a9d7-4bf9-807a-9f80ce56d8ac/marketplace-operator/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.700751 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-utilities/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.831743 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-utilities/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.877887 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-content/0.log" Nov 29 05:09:18 crc kubenswrapper[4631]: I1129 05:09:18.922217 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-content/0.log" Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.058074 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-content/0.log" Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.083282 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-utilities/0.log" Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.213054 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/registry-server/0.log" Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.337849 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-utilities/0.log" Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.439552 4631 generic.go:334] "Generic (PLEG): container finished" podID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerID="c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da" exitCode=0 Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.439599 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sphfk" event={"ID":"dc926027-25fa-4fad-9d05-cfb2eb3d0b51","Type":"ContainerDied","Data":"c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da"} Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.514193 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-content/0.log" Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.541602 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-content/0.log" Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.571083 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-utilities/0.log" Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.738388 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-content/0.log" Nov 29 05:09:19 crc kubenswrapper[4631]: I1129 05:09:19.769481 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-utilities/0.log" Nov 29 05:09:20 crc kubenswrapper[4631]: I1129 05:09:20.208581 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/registry-server/0.log" Nov 29 05:09:20 crc kubenswrapper[4631]: I1129 05:09:20.448990 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sphfk" event={"ID":"dc926027-25fa-4fad-9d05-cfb2eb3d0b51","Type":"ContainerStarted","Data":"aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5"} Nov 29 05:09:20 crc kubenswrapper[4631]: I1129 05:09:20.474074 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sphfk" podStartSLOduration=2.8945811949999998 podStartE2EDuration="5.474054875s" podCreationTimestamp="2025-11-29 05:09:15 +0000 UTC" firstStartedPulling="2025-11-29 05:09:17.423115054 +0000 UTC m=+3484.487618568" lastFinishedPulling="2025-11-29 05:09:20.002588734 +0000 UTC m=+3487.067092248" observedRunningTime="2025-11-29 05:09:20.467924164 +0000 UTC m=+3487.532427698" watchObservedRunningTime="2025-11-29 05:09:20.474054875 +0000 UTC m=+3487.538558399" Nov 29 05:09:26 crc kubenswrapper[4631]: I1129 05:09:26.122962 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:26 crc kubenswrapper[4631]: I1129 05:09:26.123645 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:26 crc kubenswrapper[4631]: I1129 05:09:26.184578 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:26 crc kubenswrapper[4631]: I1129 05:09:26.582607 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:26 crc kubenswrapper[4631]: I1129 05:09:26.636064 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sphfk"] Nov 29 05:09:28 crc kubenswrapper[4631]: I1129 05:09:28.527848 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sphfk" podUID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerName="registry-server" containerID="cri-o://aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5" gracePeriod=2 Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.050055 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.158995 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t42rm\" (UniqueName: \"kubernetes.io/projected/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-kube-api-access-t42rm\") pod \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.159210 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-catalog-content\") pod \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.159441 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-utilities\") pod \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\" (UID: \"dc926027-25fa-4fad-9d05-cfb2eb3d0b51\") " Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.160229 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-utilities" (OuterVolumeSpecName: "utilities") pod "dc926027-25fa-4fad-9d05-cfb2eb3d0b51" (UID: "dc926027-25fa-4fad-9d05-cfb2eb3d0b51"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.160997 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.165626 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-kube-api-access-t42rm" (OuterVolumeSpecName: "kube-api-access-t42rm") pod "dc926027-25fa-4fad-9d05-cfb2eb3d0b51" (UID: "dc926027-25fa-4fad-9d05-cfb2eb3d0b51"). InnerVolumeSpecName "kube-api-access-t42rm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.176677 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dc926027-25fa-4fad-9d05-cfb2eb3d0b51" (UID: "dc926027-25fa-4fad-9d05-cfb2eb3d0b51"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.263134 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t42rm\" (UniqueName: \"kubernetes.io/projected/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-kube-api-access-t42rm\") on node \"crc\" DevicePath \"\"" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.263177 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc926027-25fa-4fad-9d05-cfb2eb3d0b51-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.541453 4631 generic.go:334] "Generic (PLEG): container finished" podID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerID="aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5" exitCode=0 Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.541666 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sphfk" event={"ID":"dc926027-25fa-4fad-9d05-cfb2eb3d0b51","Type":"ContainerDied","Data":"aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5"} Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.541810 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sphfk" event={"ID":"dc926027-25fa-4fad-9d05-cfb2eb3d0b51","Type":"ContainerDied","Data":"1c3c74858d7e251c6dcadcf7b43615c949bc8b65aa3830f9c298a118f98889fd"} Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.541847 4631 scope.go:117] "RemoveContainer" containerID="aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.541751 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sphfk" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.578707 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sphfk"] Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.592266 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sphfk"] Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.597220 4631 scope.go:117] "RemoveContainer" containerID="c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.624678 4631 scope.go:117] "RemoveContainer" containerID="2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.667088 4631 scope.go:117] "RemoveContainer" containerID="aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5" Nov 29 05:09:29 crc kubenswrapper[4631]: E1129 05:09:29.667510 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5\": container with ID starting with aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5 not found: ID does not exist" containerID="aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.667537 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5"} err="failed to get container status \"aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5\": rpc error: code = NotFound desc = could not find container \"aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5\": container with ID starting with aee17900b9adcc4e3ab455e7bbefe7539f42bf378b31ff16b028f0262aa3adf5 not found: ID does not exist" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.667558 4631 scope.go:117] "RemoveContainer" containerID="c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da" Nov 29 05:09:29 crc kubenswrapper[4631]: E1129 05:09:29.668424 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da\": container with ID starting with c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da not found: ID does not exist" containerID="c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.668446 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da"} err="failed to get container status \"c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da\": rpc error: code = NotFound desc = could not find container \"c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da\": container with ID starting with c9337a9e277b3ba873067eebd0c611c0c06f232580ad24ac7493270a63fc29da not found: ID does not exist" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.668461 4631 scope.go:117] "RemoveContainer" containerID="2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12" Nov 29 05:09:29 crc kubenswrapper[4631]: E1129 05:09:29.668743 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12\": container with ID starting with 2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12 not found: ID does not exist" containerID="2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12" Nov 29 05:09:29 crc kubenswrapper[4631]: I1129 05:09:29.668783 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12"} err="failed to get container status \"2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12\": rpc error: code = NotFound desc = could not find container \"2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12\": container with ID starting with 2c0530f0e1f21265e9e555b7f517dacb8fd07cd1bfaeca584329ae28cd5ecc12 not found: ID does not exist" Nov 29 05:09:31 crc kubenswrapper[4631]: I1129 05:09:31.262896 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" path="/var/lib/kubelet/pods/dc926027-25fa-4fad-9d05-cfb2eb3d0b51/volumes" Nov 29 05:10:50 crc kubenswrapper[4631]: I1129 05:10:50.715952 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 05:10:50 crc kubenswrapper[4631]: I1129 05:10:50.716564 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 05:11:06 crc kubenswrapper[4631]: I1129 05:11:06.540034 4631 generic.go:334] "Generic (PLEG): container finished" podID="a441aed9-70ab-4efd-b036-0f0af5515f84" containerID="dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9" exitCode=0 Nov 29 05:11:06 crc kubenswrapper[4631]: I1129 05:11:06.540163 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5sd8j/must-gather-8nkth" event={"ID":"a441aed9-70ab-4efd-b036-0f0af5515f84","Type":"ContainerDied","Data":"dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9"} Nov 29 05:11:06 crc kubenswrapper[4631]: I1129 05:11:06.541083 4631 scope.go:117] "RemoveContainer" containerID="dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9" Nov 29 05:11:07 crc kubenswrapper[4631]: I1129 05:11:07.121983 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-5sd8j_must-gather-8nkth_a441aed9-70ab-4efd-b036-0f0af5515f84/gather/0.log" Nov 29 05:11:14 crc kubenswrapper[4631]: I1129 05:11:14.939642 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5sd8j/must-gather-8nkth"] Nov 29 05:11:14 crc kubenswrapper[4631]: I1129 05:11:14.941292 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-5sd8j/must-gather-8nkth" podUID="a441aed9-70ab-4efd-b036-0f0af5515f84" containerName="copy" containerID="cri-o://9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee" gracePeriod=2 Nov 29 05:11:14 crc kubenswrapper[4631]: I1129 05:11:14.953097 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5sd8j/must-gather-8nkth"] Nov 29 05:11:15 crc kubenswrapper[4631]: E1129 05:11:15.273977 4631 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda441aed9_70ab_4efd_b036_0f0af5515f84.slice/crio-9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda441aed9_70ab_4efd_b036_0f0af5515f84.slice/crio-conmon-9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee.scope\": RecentStats: unable to find data in memory cache]" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.410859 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-5sd8j_must-gather-8nkth_a441aed9-70ab-4efd-b036-0f0af5515f84/copy/0.log" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.411456 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/must-gather-8nkth" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.569161 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a441aed9-70ab-4efd-b036-0f0af5515f84-must-gather-output\") pod \"a441aed9-70ab-4efd-b036-0f0af5515f84\" (UID: \"a441aed9-70ab-4efd-b036-0f0af5515f84\") " Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.569251 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlsbp\" (UniqueName: \"kubernetes.io/projected/a441aed9-70ab-4efd-b036-0f0af5515f84-kube-api-access-nlsbp\") pod \"a441aed9-70ab-4efd-b036-0f0af5515f84\" (UID: \"a441aed9-70ab-4efd-b036-0f0af5515f84\") " Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.586492 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a441aed9-70ab-4efd-b036-0f0af5515f84-kube-api-access-nlsbp" (OuterVolumeSpecName: "kube-api-access-nlsbp") pod "a441aed9-70ab-4efd-b036-0f0af5515f84" (UID: "a441aed9-70ab-4efd-b036-0f0af5515f84"). InnerVolumeSpecName "kube-api-access-nlsbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.651025 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-5sd8j_must-gather-8nkth_a441aed9-70ab-4efd-b036-0f0af5515f84/copy/0.log" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.651547 4631 generic.go:334] "Generic (PLEG): container finished" podID="a441aed9-70ab-4efd-b036-0f0af5515f84" containerID="9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee" exitCode=143 Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.651603 4631 scope.go:117] "RemoveContainer" containerID="9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.651726 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5sd8j/must-gather-8nkth" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.677644 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlsbp\" (UniqueName: \"kubernetes.io/projected/a441aed9-70ab-4efd-b036-0f0af5515f84-kube-api-access-nlsbp\") on node \"crc\" DevicePath \"\"" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.687149 4631 scope.go:117] "RemoveContainer" containerID="dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.732585 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a441aed9-70ab-4efd-b036-0f0af5515f84-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "a441aed9-70ab-4efd-b036-0f0af5515f84" (UID: "a441aed9-70ab-4efd-b036-0f0af5515f84"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.743901 4631 scope.go:117] "RemoveContainer" containerID="9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee" Nov 29 05:11:15 crc kubenswrapper[4631]: E1129 05:11:15.744302 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee\": container with ID starting with 9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee not found: ID does not exist" containerID="9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.744339 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee"} err="failed to get container status \"9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee\": rpc error: code = NotFound desc = could not find container \"9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee\": container with ID starting with 9e020b504caf79dac220239f9d2f5e3cdeba1d28c3b74e4a3e5b764c3e2c1bee not found: ID does not exist" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.744358 4631 scope.go:117] "RemoveContainer" containerID="dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9" Nov 29 05:11:15 crc kubenswrapper[4631]: E1129 05:11:15.744789 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9\": container with ID starting with dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9 not found: ID does not exist" containerID="dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.744864 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9"} err="failed to get container status \"dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9\": rpc error: code = NotFound desc = could not find container \"dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9\": container with ID starting with dc7864a185e5e3ae96a3e41e526dcb4f34ba75828e1c18ba23ae0ebce85b50c9 not found: ID does not exist" Nov 29 05:11:15 crc kubenswrapper[4631]: I1129 05:11:15.779771 4631 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a441aed9-70ab-4efd-b036-0f0af5515f84-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 29 05:11:17 crc kubenswrapper[4631]: I1129 05:11:17.229753 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a441aed9-70ab-4efd-b036-0f0af5515f84" path="/var/lib/kubelet/pods/a441aed9-70ab-4efd-b036-0f0af5515f84/volumes" Nov 29 05:11:20 crc kubenswrapper[4631]: I1129 05:11:20.716082 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 05:11:20 crc kubenswrapper[4631]: I1129 05:11:20.718876 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 05:11:50 crc kubenswrapper[4631]: I1129 05:11:50.716584 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 05:11:50 crc kubenswrapper[4631]: I1129 05:11:50.717265 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 05:11:50 crc kubenswrapper[4631]: I1129 05:11:50.717321 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 05:11:50 crc kubenswrapper[4631]: I1129 05:11:50.718243 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b6c56a99240518977d61f2c7d5677497c6ecb3a6e851512ce81f757de601d59d"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 05:11:50 crc kubenswrapper[4631]: I1129 05:11:50.718317 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://b6c56a99240518977d61f2c7d5677497c6ecb3a6e851512ce81f757de601d59d" gracePeriod=600 Nov 29 05:11:51 crc kubenswrapper[4631]: I1129 05:11:51.713850 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="b6c56a99240518977d61f2c7d5677497c6ecb3a6e851512ce81f757de601d59d" exitCode=0 Nov 29 05:11:51 crc kubenswrapper[4631]: I1129 05:11:51.713989 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"b6c56a99240518977d61f2c7d5677497c6ecb3a6e851512ce81f757de601d59d"} Nov 29 05:11:51 crc kubenswrapper[4631]: I1129 05:11:51.714286 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8"} Nov 29 05:11:51 crc kubenswrapper[4631]: I1129 05:11:51.714311 4631 scope.go:117] "RemoveContainer" containerID="3d6fa4d35364e23485b81ecc2ca838bc35e9c427e5a33bad818a1c0a0998f2c6" Nov 29 05:12:07 crc kubenswrapper[4631]: I1129 05:12:07.921918 4631 scope.go:117] "RemoveContainer" containerID="46bd553c09e90cbb74c5a683b895e8294e6b667a3422aacfacb3058f19b5161a" Nov 29 05:13:08 crc kubenswrapper[4631]: I1129 05:13:08.042220 4631 scope.go:117] "RemoveContainer" containerID="2971ce6b5ba7a22c8b01b61e19ec83ce3a820125f171fb4d5700e6386751ea37" Nov 29 05:13:08 crc kubenswrapper[4631]: I1129 05:13:08.077053 4631 scope.go:117] "RemoveContainer" containerID="695b71170e7f143208a88fe3b4ca504ec4984f3a8192e5c3a80d720ff1c516f2" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.344473 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qgrhs/must-gather-l7sbn"] Nov 29 05:13:42 crc kubenswrapper[4631]: E1129 05:13:42.345294 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerName="registry-server" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.345308 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerName="registry-server" Nov 29 05:13:42 crc kubenswrapper[4631]: E1129 05:13:42.345373 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a441aed9-70ab-4efd-b036-0f0af5515f84" containerName="copy" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.345380 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="a441aed9-70ab-4efd-b036-0f0af5515f84" containerName="copy" Nov 29 05:13:42 crc kubenswrapper[4631]: E1129 05:13:42.345394 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerName="extract-utilities" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.345400 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerName="extract-utilities" Nov 29 05:13:42 crc kubenswrapper[4631]: E1129 05:13:42.345414 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerName="extract-content" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.345419 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerName="extract-content" Nov 29 05:13:42 crc kubenswrapper[4631]: E1129 05:13:42.345428 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a441aed9-70ab-4efd-b036-0f0af5515f84" containerName="gather" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.345434 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="a441aed9-70ab-4efd-b036-0f0af5515f84" containerName="gather" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.345603 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="a441aed9-70ab-4efd-b036-0f0af5515f84" containerName="gather" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.345630 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="a441aed9-70ab-4efd-b036-0f0af5515f84" containerName="copy" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.345642 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc926027-25fa-4fad-9d05-cfb2eb3d0b51" containerName="registry-server" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.346586 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/must-gather-l7sbn" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.351880 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-qgrhs"/"openshift-service-ca.crt" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.351969 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-qgrhs"/"kube-root-ca.crt" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.353157 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-qgrhs/must-gather-l7sbn"] Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.361549 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b21fae68-736e-42d5-86e8-c7288e5dc464-must-gather-output\") pod \"must-gather-l7sbn\" (UID: \"b21fae68-736e-42d5-86e8-c7288e5dc464\") " pod="openshift-must-gather-qgrhs/must-gather-l7sbn" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.361604 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2qrm\" (UniqueName: \"kubernetes.io/projected/b21fae68-736e-42d5-86e8-c7288e5dc464-kube-api-access-h2qrm\") pod \"must-gather-l7sbn\" (UID: \"b21fae68-736e-42d5-86e8-c7288e5dc464\") " pod="openshift-must-gather-qgrhs/must-gather-l7sbn" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.463453 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b21fae68-736e-42d5-86e8-c7288e5dc464-must-gather-output\") pod \"must-gather-l7sbn\" (UID: \"b21fae68-736e-42d5-86e8-c7288e5dc464\") " pod="openshift-must-gather-qgrhs/must-gather-l7sbn" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.463521 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2qrm\" (UniqueName: \"kubernetes.io/projected/b21fae68-736e-42d5-86e8-c7288e5dc464-kube-api-access-h2qrm\") pod \"must-gather-l7sbn\" (UID: \"b21fae68-736e-42d5-86e8-c7288e5dc464\") " pod="openshift-must-gather-qgrhs/must-gather-l7sbn" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.463886 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b21fae68-736e-42d5-86e8-c7288e5dc464-must-gather-output\") pod \"must-gather-l7sbn\" (UID: \"b21fae68-736e-42d5-86e8-c7288e5dc464\") " pod="openshift-must-gather-qgrhs/must-gather-l7sbn" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.496276 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2qrm\" (UniqueName: \"kubernetes.io/projected/b21fae68-736e-42d5-86e8-c7288e5dc464-kube-api-access-h2qrm\") pod \"must-gather-l7sbn\" (UID: \"b21fae68-736e-42d5-86e8-c7288e5dc464\") " pod="openshift-must-gather-qgrhs/must-gather-l7sbn" Nov 29 05:13:42 crc kubenswrapper[4631]: I1129 05:13:42.665135 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/must-gather-l7sbn" Nov 29 05:13:43 crc kubenswrapper[4631]: I1129 05:13:43.116064 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-qgrhs/must-gather-l7sbn"] Nov 29 05:13:44 crc kubenswrapper[4631]: I1129 05:13:44.060589 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/must-gather-l7sbn" event={"ID":"b21fae68-736e-42d5-86e8-c7288e5dc464","Type":"ContainerStarted","Data":"8cf94ce82c7b6ab79c2070f54be0199a7c750d2070a22df9ad4ad3a8bc46a64b"} Nov 29 05:13:44 crc kubenswrapper[4631]: I1129 05:13:44.062403 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/must-gather-l7sbn" event={"ID":"b21fae68-736e-42d5-86e8-c7288e5dc464","Type":"ContainerStarted","Data":"924c2209320d75b5494126faa60f48435b46207623bbeefaf505ec8b8f1fc855"} Nov 29 05:13:44 crc kubenswrapper[4631]: I1129 05:13:44.062426 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/must-gather-l7sbn" event={"ID":"b21fae68-736e-42d5-86e8-c7288e5dc464","Type":"ContainerStarted","Data":"51c379fb631f7e360c54c45aee7921f1a416e021bcf6762daf1112611a6e88af"} Nov 29 05:13:44 crc kubenswrapper[4631]: I1129 05:13:44.090116 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-qgrhs/must-gather-l7sbn" podStartSLOduration=2.090089963 podStartE2EDuration="2.090089963s" podCreationTimestamp="2025-11-29 05:13:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 05:13:44.087085989 +0000 UTC m=+3751.151589533" watchObservedRunningTime="2025-11-29 05:13:44.090089963 +0000 UTC m=+3751.154593507" Nov 29 05:13:47 crc kubenswrapper[4631]: I1129 05:13:47.083121 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qgrhs/crc-debug-rsgzp"] Nov 29 05:13:47 crc kubenswrapper[4631]: I1129 05:13:47.085903 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" Nov 29 05:13:47 crc kubenswrapper[4631]: I1129 05:13:47.089807 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-qgrhs"/"default-dockercfg-hhdb6" Nov 29 05:13:47 crc kubenswrapper[4631]: I1129 05:13:47.162924 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkbmd\" (UniqueName: \"kubernetes.io/projected/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-kube-api-access-pkbmd\") pod \"crc-debug-rsgzp\" (UID: \"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4\") " pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" Nov 29 05:13:47 crc kubenswrapper[4631]: I1129 05:13:47.163231 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-host\") pod \"crc-debug-rsgzp\" (UID: \"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4\") " pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" Nov 29 05:13:47 crc kubenswrapper[4631]: I1129 05:13:47.449619 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-host\") pod \"crc-debug-rsgzp\" (UID: \"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4\") " pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" Nov 29 05:13:47 crc kubenswrapper[4631]: I1129 05:13:47.449933 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkbmd\" (UniqueName: \"kubernetes.io/projected/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-kube-api-access-pkbmd\") pod \"crc-debug-rsgzp\" (UID: \"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4\") " pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" Nov 29 05:13:47 crc kubenswrapper[4631]: I1129 05:13:47.742042 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-host\") pod \"crc-debug-rsgzp\" (UID: \"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4\") " pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" Nov 29 05:13:47 crc kubenswrapper[4631]: I1129 05:13:47.842708 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkbmd\" (UniqueName: \"kubernetes.io/projected/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-kube-api-access-pkbmd\") pod \"crc-debug-rsgzp\" (UID: \"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4\") " pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" Nov 29 05:13:48 crc kubenswrapper[4631]: I1129 05:13:48.051603 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" Nov 29 05:13:48 crc kubenswrapper[4631]: W1129 05:13:48.084466 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee9b97dd_6b67_4c55_b73e_bb320adf8cf4.slice/crio-9ada8a5f8e61e88c1250c6e7a99d46d5e82bd3525ffcc8d25603a91fb2745d71 WatchSource:0}: Error finding container 9ada8a5f8e61e88c1250c6e7a99d46d5e82bd3525ffcc8d25603a91fb2745d71: Status 404 returned error can't find the container with id 9ada8a5f8e61e88c1250c6e7a99d46d5e82bd3525ffcc8d25603a91fb2745d71 Nov 29 05:13:48 crc kubenswrapper[4631]: I1129 05:13:48.103442 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" event={"ID":"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4","Type":"ContainerStarted","Data":"9ada8a5f8e61e88c1250c6e7a99d46d5e82bd3525ffcc8d25603a91fb2745d71"} Nov 29 05:13:49 crc kubenswrapper[4631]: I1129 05:13:49.112418 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" event={"ID":"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4","Type":"ContainerStarted","Data":"fea6c20cdd7fbba09b5f9822955f9cb9a5f7e46575a458f4218c377b883c18db"} Nov 29 05:13:49 crc kubenswrapper[4631]: I1129 05:13:49.130276 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" podStartSLOduration=2.130259909 podStartE2EDuration="2.130259909s" podCreationTimestamp="2025-11-29 05:13:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-29 05:13:49.123341319 +0000 UTC m=+3756.187844833" watchObservedRunningTime="2025-11-29 05:13:49.130259909 +0000 UTC m=+3756.194763423" Nov 29 05:14:20 crc kubenswrapper[4631]: I1129 05:14:20.715833 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 05:14:20 crc kubenswrapper[4631]: I1129 05:14:20.716410 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 05:14:22 crc kubenswrapper[4631]: I1129 05:14:22.388453 4631 generic.go:334] "Generic (PLEG): container finished" podID="ee9b97dd-6b67-4c55-b73e-bb320adf8cf4" containerID="fea6c20cdd7fbba09b5f9822955f9cb9a5f7e46575a458f4218c377b883c18db" exitCode=0 Nov 29 05:14:22 crc kubenswrapper[4631]: I1129 05:14:22.388768 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" event={"ID":"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4","Type":"ContainerDied","Data":"fea6c20cdd7fbba09b5f9822955f9cb9a5f7e46575a458f4218c377b883c18db"} Nov 29 05:14:22 crc kubenswrapper[4631]: I1129 05:14:22.909979 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-t75xr"] Nov 29 05:14:22 crc kubenswrapper[4631]: I1129 05:14:22.911784 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:22 crc kubenswrapper[4631]: I1129 05:14:22.935560 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t75xr"] Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.007602 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-utilities\") pod \"certified-operators-t75xr\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.007646 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-catalog-content\") pod \"certified-operators-t75xr\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.007703 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjlzh\" (UniqueName: \"kubernetes.io/projected/8d075d1a-fc52-433c-80ec-661a65411d87-kube-api-access-pjlzh\") pod \"certified-operators-t75xr\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.109109 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjlzh\" (UniqueName: \"kubernetes.io/projected/8d075d1a-fc52-433c-80ec-661a65411d87-kube-api-access-pjlzh\") pod \"certified-operators-t75xr\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.109277 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-utilities\") pod \"certified-operators-t75xr\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.109296 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-catalog-content\") pod \"certified-operators-t75xr\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.109763 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-catalog-content\") pod \"certified-operators-t75xr\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.109826 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-utilities\") pod \"certified-operators-t75xr\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.127077 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjlzh\" (UniqueName: \"kubernetes.io/projected/8d075d1a-fc52-433c-80ec-661a65411d87-kube-api-access-pjlzh\") pod \"certified-operators-t75xr\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.232942 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.495886 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.575724 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qgrhs/crc-debug-rsgzp"] Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.585718 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qgrhs/crc-debug-rsgzp"] Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.619513 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-host\") pod \"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4\" (UID: \"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4\") " Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.619663 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkbmd\" (UniqueName: \"kubernetes.io/projected/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-kube-api-access-pkbmd\") pod \"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4\" (UID: \"ee9b97dd-6b67-4c55-b73e-bb320adf8cf4\") " Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.623412 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-host" (OuterVolumeSpecName: "host") pod "ee9b97dd-6b67-4c55-b73e-bb320adf8cf4" (UID: "ee9b97dd-6b67-4c55-b73e-bb320adf8cf4"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.643767 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-kube-api-access-pkbmd" (OuterVolumeSpecName: "kube-api-access-pkbmd") pod "ee9b97dd-6b67-4c55-b73e-bb320adf8cf4" (UID: "ee9b97dd-6b67-4c55-b73e-bb320adf8cf4"). InnerVolumeSpecName "kube-api-access-pkbmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.722118 4631 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-host\") on node \"crc\" DevicePath \"\"" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.722150 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkbmd\" (UniqueName: \"kubernetes.io/projected/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4-kube-api-access-pkbmd\") on node \"crc\" DevicePath \"\"" Nov 29 05:14:23 crc kubenswrapper[4631]: I1129 05:14:23.846867 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t75xr"] Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.411982 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ada8a5f8e61e88c1250c6e7a99d46d5e82bd3525ffcc8d25603a91fb2745d71" Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.412268 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-rsgzp" Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.419666 4631 generic.go:334] "Generic (PLEG): container finished" podID="8d075d1a-fc52-433c-80ec-661a65411d87" containerID="62e96f136313ab30114bb400600c1aabfef60bdd99f90069da617211108fee06" exitCode=0 Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.419720 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t75xr" event={"ID":"8d075d1a-fc52-433c-80ec-661a65411d87","Type":"ContainerDied","Data":"62e96f136313ab30114bb400600c1aabfef60bdd99f90069da617211108fee06"} Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.419745 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t75xr" event={"ID":"8d075d1a-fc52-433c-80ec-661a65411d87","Type":"ContainerStarted","Data":"b30cad2513951d887fa647d96902d64ce9656a5d6afb53c329ca0f72ca3f1866"} Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.422923 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.894583 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qgrhs/crc-debug-xt2t8"] Nov 29 05:14:24 crc kubenswrapper[4631]: E1129 05:14:24.895133 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee9b97dd-6b67-4c55-b73e-bb320adf8cf4" containerName="container-00" Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.895146 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee9b97dd-6b67-4c55-b73e-bb320adf8cf4" containerName="container-00" Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.895352 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee9b97dd-6b67-4c55-b73e-bb320adf8cf4" containerName="container-00" Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.895907 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.897891 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-qgrhs"/"default-dockercfg-hhdb6" Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.958267 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqmlm\" (UniqueName: \"kubernetes.io/projected/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-kube-api-access-mqmlm\") pod \"crc-debug-xt2t8\" (UID: \"78eb4293-ca15-45d4-92fc-8ec0ec9890b9\") " pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" Nov 29 05:14:24 crc kubenswrapper[4631]: I1129 05:14:24.958379 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-host\") pod \"crc-debug-xt2t8\" (UID: \"78eb4293-ca15-45d4-92fc-8ec0ec9890b9\") " pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" Nov 29 05:14:25 crc kubenswrapper[4631]: I1129 05:14:25.059876 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqmlm\" (UniqueName: \"kubernetes.io/projected/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-kube-api-access-mqmlm\") pod \"crc-debug-xt2t8\" (UID: \"78eb4293-ca15-45d4-92fc-8ec0ec9890b9\") " pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" Nov 29 05:14:25 crc kubenswrapper[4631]: I1129 05:14:25.059950 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-host\") pod \"crc-debug-xt2t8\" (UID: \"78eb4293-ca15-45d4-92fc-8ec0ec9890b9\") " pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" Nov 29 05:14:25 crc kubenswrapper[4631]: I1129 05:14:25.060111 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-host\") pod \"crc-debug-xt2t8\" (UID: \"78eb4293-ca15-45d4-92fc-8ec0ec9890b9\") " pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" Nov 29 05:14:25 crc kubenswrapper[4631]: I1129 05:14:25.217587 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqmlm\" (UniqueName: \"kubernetes.io/projected/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-kube-api-access-mqmlm\") pod \"crc-debug-xt2t8\" (UID: \"78eb4293-ca15-45d4-92fc-8ec0ec9890b9\") " pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" Nov 29 05:14:25 crc kubenswrapper[4631]: I1129 05:14:25.226654 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee9b97dd-6b67-4c55-b73e-bb320adf8cf4" path="/var/lib/kubelet/pods/ee9b97dd-6b67-4c55-b73e-bb320adf8cf4/volumes" Nov 29 05:14:25 crc kubenswrapper[4631]: I1129 05:14:25.509198 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" Nov 29 05:14:26 crc kubenswrapper[4631]: I1129 05:14:26.436821 4631 generic.go:334] "Generic (PLEG): container finished" podID="78eb4293-ca15-45d4-92fc-8ec0ec9890b9" containerID="7635f10d8c1144b0c94f20dd5f7aa8f4478fcc4ed6c02587629081749556cea0" exitCode=0 Nov 29 05:14:26 crc kubenswrapper[4631]: I1129 05:14:26.436902 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" event={"ID":"78eb4293-ca15-45d4-92fc-8ec0ec9890b9","Type":"ContainerDied","Data":"7635f10d8c1144b0c94f20dd5f7aa8f4478fcc4ed6c02587629081749556cea0"} Nov 29 05:14:26 crc kubenswrapper[4631]: I1129 05:14:26.437271 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" event={"ID":"78eb4293-ca15-45d4-92fc-8ec0ec9890b9","Type":"ContainerStarted","Data":"d5df68a979052488bc19b6b8862d157c38fb2fdf65c457ad2025b291aced9431"} Nov 29 05:14:26 crc kubenswrapper[4631]: I1129 05:14:26.438728 4631 generic.go:334] "Generic (PLEG): container finished" podID="8d075d1a-fc52-433c-80ec-661a65411d87" containerID="ae26d3fad9253306546bfcb36cb4274084ae8e49946f8c0bc5663f66fe5edc64" exitCode=0 Nov 29 05:14:26 crc kubenswrapper[4631]: I1129 05:14:26.438764 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t75xr" event={"ID":"8d075d1a-fc52-433c-80ec-661a65411d87","Type":"ContainerDied","Data":"ae26d3fad9253306546bfcb36cb4274084ae8e49946f8c0bc5663f66fe5edc64"} Nov 29 05:14:26 crc kubenswrapper[4631]: I1129 05:14:26.831526 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qgrhs/crc-debug-xt2t8"] Nov 29 05:14:26 crc kubenswrapper[4631]: I1129 05:14:26.839639 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qgrhs/crc-debug-xt2t8"] Nov 29 05:14:27 crc kubenswrapper[4631]: I1129 05:14:27.449449 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t75xr" event={"ID":"8d075d1a-fc52-433c-80ec-661a65411d87","Type":"ContainerStarted","Data":"6045b9b719271e3528caad57300663ec3c856ec5836fab8dbbaaf886104fe220"} Nov 29 05:14:27 crc kubenswrapper[4631]: I1129 05:14:27.470928 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-t75xr" podStartSLOduration=2.918395286 podStartE2EDuration="5.470912574s" podCreationTimestamp="2025-11-29 05:14:22 +0000 UTC" firstStartedPulling="2025-11-29 05:14:24.422713935 +0000 UTC m=+3791.487217449" lastFinishedPulling="2025-11-29 05:14:26.975231223 +0000 UTC m=+3794.039734737" observedRunningTime="2025-11-29 05:14:27.466353732 +0000 UTC m=+3794.530857246" watchObservedRunningTime="2025-11-29 05:14:27.470912574 +0000 UTC m=+3794.535416088" Nov 29 05:14:27 crc kubenswrapper[4631]: I1129 05:14:27.555345 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" Nov 29 05:14:27 crc kubenswrapper[4631]: I1129 05:14:27.732658 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-host\") pod \"78eb4293-ca15-45d4-92fc-8ec0ec9890b9\" (UID: \"78eb4293-ca15-45d4-92fc-8ec0ec9890b9\") " Nov 29 05:14:27 crc kubenswrapper[4631]: I1129 05:14:27.732728 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqmlm\" (UniqueName: \"kubernetes.io/projected/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-kube-api-access-mqmlm\") pod \"78eb4293-ca15-45d4-92fc-8ec0ec9890b9\" (UID: \"78eb4293-ca15-45d4-92fc-8ec0ec9890b9\") " Nov 29 05:14:27 crc kubenswrapper[4631]: I1129 05:14:27.733060 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-host" (OuterVolumeSpecName: "host") pod "78eb4293-ca15-45d4-92fc-8ec0ec9890b9" (UID: "78eb4293-ca15-45d4-92fc-8ec0ec9890b9"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 05:14:27 crc kubenswrapper[4631]: I1129 05:14:27.733630 4631 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-host\") on node \"crc\" DevicePath \"\"" Nov 29 05:14:27 crc kubenswrapper[4631]: I1129 05:14:27.738143 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-kube-api-access-mqmlm" (OuterVolumeSpecName: "kube-api-access-mqmlm") pod "78eb4293-ca15-45d4-92fc-8ec0ec9890b9" (UID: "78eb4293-ca15-45d4-92fc-8ec0ec9890b9"). InnerVolumeSpecName "kube-api-access-mqmlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:14:27 crc kubenswrapper[4631]: I1129 05:14:27.835486 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqmlm\" (UniqueName: \"kubernetes.io/projected/78eb4293-ca15-45d4-92fc-8ec0ec9890b9-kube-api-access-mqmlm\") on node \"crc\" DevicePath \"\"" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.123882 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qgrhs/crc-debug-vnc9f"] Nov 29 05:14:28 crc kubenswrapper[4631]: E1129 05:14:28.124212 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78eb4293-ca15-45d4-92fc-8ec0ec9890b9" containerName="container-00" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.124227 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="78eb4293-ca15-45d4-92fc-8ec0ec9890b9" containerName="container-00" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.124437 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="78eb4293-ca15-45d4-92fc-8ec0ec9890b9" containerName="container-00" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.124999 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.241319 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kgz8\" (UniqueName: \"kubernetes.io/projected/b65949eb-86cb-4b45-80aa-307a5b9dec99-kube-api-access-8kgz8\") pod \"crc-debug-vnc9f\" (UID: \"b65949eb-86cb-4b45-80aa-307a5b9dec99\") " pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.241646 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b65949eb-86cb-4b45-80aa-307a5b9dec99-host\") pod \"crc-debug-vnc9f\" (UID: \"b65949eb-86cb-4b45-80aa-307a5b9dec99\") " pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.343526 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kgz8\" (UniqueName: \"kubernetes.io/projected/b65949eb-86cb-4b45-80aa-307a5b9dec99-kube-api-access-8kgz8\") pod \"crc-debug-vnc9f\" (UID: \"b65949eb-86cb-4b45-80aa-307a5b9dec99\") " pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.343868 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b65949eb-86cb-4b45-80aa-307a5b9dec99-host\") pod \"crc-debug-vnc9f\" (UID: \"b65949eb-86cb-4b45-80aa-307a5b9dec99\") " pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.343985 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b65949eb-86cb-4b45-80aa-307a5b9dec99-host\") pod \"crc-debug-vnc9f\" (UID: \"b65949eb-86cb-4b45-80aa-307a5b9dec99\") " pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.360830 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kgz8\" (UniqueName: \"kubernetes.io/projected/b65949eb-86cb-4b45-80aa-307a5b9dec99-kube-api-access-8kgz8\") pod \"crc-debug-vnc9f\" (UID: \"b65949eb-86cb-4b45-80aa-307a5b9dec99\") " pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.439198 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.471069 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-xt2t8" Nov 29 05:14:28 crc kubenswrapper[4631]: I1129 05:14:28.471114 4631 scope.go:117] "RemoveContainer" containerID="7635f10d8c1144b0c94f20dd5f7aa8f4478fcc4ed6c02587629081749556cea0" Nov 29 05:14:28 crc kubenswrapper[4631]: W1129 05:14:28.485476 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb65949eb_86cb_4b45_80aa_307a5b9dec99.slice/crio-0d14931350f70f47943904ca025c5a82c6635ba20e262e92874aef7548cf374b WatchSource:0}: Error finding container 0d14931350f70f47943904ca025c5a82c6635ba20e262e92874aef7548cf374b: Status 404 returned error can't find the container with id 0d14931350f70f47943904ca025c5a82c6635ba20e262e92874aef7548cf374b Nov 29 05:14:29 crc kubenswrapper[4631]: I1129 05:14:29.228965 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78eb4293-ca15-45d4-92fc-8ec0ec9890b9" path="/var/lib/kubelet/pods/78eb4293-ca15-45d4-92fc-8ec0ec9890b9/volumes" Nov 29 05:14:29 crc kubenswrapper[4631]: I1129 05:14:29.483189 4631 generic.go:334] "Generic (PLEG): container finished" podID="b65949eb-86cb-4b45-80aa-307a5b9dec99" containerID="96860bd57d0dad3ae870fbcad1be03def26fbb094099044471a3eaff962268b8" exitCode=0 Nov 29 05:14:29 crc kubenswrapper[4631]: I1129 05:14:29.483234 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" event={"ID":"b65949eb-86cb-4b45-80aa-307a5b9dec99","Type":"ContainerDied","Data":"96860bd57d0dad3ae870fbcad1be03def26fbb094099044471a3eaff962268b8"} Nov 29 05:14:29 crc kubenswrapper[4631]: I1129 05:14:29.483260 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" event={"ID":"b65949eb-86cb-4b45-80aa-307a5b9dec99","Type":"ContainerStarted","Data":"0d14931350f70f47943904ca025c5a82c6635ba20e262e92874aef7548cf374b"} Nov 29 05:14:29 crc kubenswrapper[4631]: I1129 05:14:29.526799 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qgrhs/crc-debug-vnc9f"] Nov 29 05:14:29 crc kubenswrapper[4631]: I1129 05:14:29.537207 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qgrhs/crc-debug-vnc9f"] Nov 29 05:14:30 crc kubenswrapper[4631]: I1129 05:14:30.578234 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" Nov 29 05:14:30 crc kubenswrapper[4631]: I1129 05:14:30.683476 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kgz8\" (UniqueName: \"kubernetes.io/projected/b65949eb-86cb-4b45-80aa-307a5b9dec99-kube-api-access-8kgz8\") pod \"b65949eb-86cb-4b45-80aa-307a5b9dec99\" (UID: \"b65949eb-86cb-4b45-80aa-307a5b9dec99\") " Nov 29 05:14:30 crc kubenswrapper[4631]: I1129 05:14:30.683575 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b65949eb-86cb-4b45-80aa-307a5b9dec99-host\") pod \"b65949eb-86cb-4b45-80aa-307a5b9dec99\" (UID: \"b65949eb-86cb-4b45-80aa-307a5b9dec99\") " Nov 29 05:14:30 crc kubenswrapper[4631]: I1129 05:14:30.684040 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b65949eb-86cb-4b45-80aa-307a5b9dec99-host" (OuterVolumeSpecName: "host") pod "b65949eb-86cb-4b45-80aa-307a5b9dec99" (UID: "b65949eb-86cb-4b45-80aa-307a5b9dec99"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 29 05:14:30 crc kubenswrapper[4631]: I1129 05:14:30.688681 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b65949eb-86cb-4b45-80aa-307a5b9dec99-kube-api-access-8kgz8" (OuterVolumeSpecName: "kube-api-access-8kgz8") pod "b65949eb-86cb-4b45-80aa-307a5b9dec99" (UID: "b65949eb-86cb-4b45-80aa-307a5b9dec99"). InnerVolumeSpecName "kube-api-access-8kgz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:14:30 crc kubenswrapper[4631]: I1129 05:14:30.785424 4631 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b65949eb-86cb-4b45-80aa-307a5b9dec99-host\") on node \"crc\" DevicePath \"\"" Nov 29 05:14:30 crc kubenswrapper[4631]: I1129 05:14:30.785454 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kgz8\" (UniqueName: \"kubernetes.io/projected/b65949eb-86cb-4b45-80aa-307a5b9dec99-kube-api-access-8kgz8\") on node \"crc\" DevicePath \"\"" Nov 29 05:14:31 crc kubenswrapper[4631]: I1129 05:14:31.275656 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b65949eb-86cb-4b45-80aa-307a5b9dec99" path="/var/lib/kubelet/pods/b65949eb-86cb-4b45-80aa-307a5b9dec99/volumes" Nov 29 05:14:31 crc kubenswrapper[4631]: I1129 05:14:31.504042 4631 scope.go:117] "RemoveContainer" containerID="96860bd57d0dad3ae870fbcad1be03def26fbb094099044471a3eaff962268b8" Nov 29 05:14:31 crc kubenswrapper[4631]: I1129 05:14:31.504103 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/crc-debug-vnc9f" Nov 29 05:14:33 crc kubenswrapper[4631]: I1129 05:14:33.235585 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:33 crc kubenswrapper[4631]: I1129 05:14:33.236072 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:33 crc kubenswrapper[4631]: I1129 05:14:33.311861 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:33 crc kubenswrapper[4631]: I1129 05:14:33.612003 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:33 crc kubenswrapper[4631]: I1129 05:14:33.711163 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t75xr"] Nov 29 05:14:35 crc kubenswrapper[4631]: I1129 05:14:35.575278 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-t75xr" podUID="8d075d1a-fc52-433c-80ec-661a65411d87" containerName="registry-server" containerID="cri-o://6045b9b719271e3528caad57300663ec3c856ec5836fab8dbbaaf886104fe220" gracePeriod=2 Nov 29 05:14:36 crc kubenswrapper[4631]: I1129 05:14:36.592614 4631 generic.go:334] "Generic (PLEG): container finished" podID="8d075d1a-fc52-433c-80ec-661a65411d87" containerID="6045b9b719271e3528caad57300663ec3c856ec5836fab8dbbaaf886104fe220" exitCode=0 Nov 29 05:14:36 crc kubenswrapper[4631]: I1129 05:14:36.592824 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t75xr" event={"ID":"8d075d1a-fc52-433c-80ec-661a65411d87","Type":"ContainerDied","Data":"6045b9b719271e3528caad57300663ec3c856ec5836fab8dbbaaf886104fe220"} Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:36.940991 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.115507 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-utilities\") pod \"8d075d1a-fc52-433c-80ec-661a65411d87\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.116218 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjlzh\" (UniqueName: \"kubernetes.io/projected/8d075d1a-fc52-433c-80ec-661a65411d87-kube-api-access-pjlzh\") pod \"8d075d1a-fc52-433c-80ec-661a65411d87\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.116276 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-catalog-content\") pod \"8d075d1a-fc52-433c-80ec-661a65411d87\" (UID: \"8d075d1a-fc52-433c-80ec-661a65411d87\") " Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.117099 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-utilities" (OuterVolumeSpecName: "utilities") pod "8d075d1a-fc52-433c-80ec-661a65411d87" (UID: "8d075d1a-fc52-433c-80ec-661a65411d87"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.123757 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d075d1a-fc52-433c-80ec-661a65411d87-kube-api-access-pjlzh" (OuterVolumeSpecName: "kube-api-access-pjlzh") pod "8d075d1a-fc52-433c-80ec-661a65411d87" (UID: "8d075d1a-fc52-433c-80ec-661a65411d87"). InnerVolumeSpecName "kube-api-access-pjlzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.188114 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d075d1a-fc52-433c-80ec-661a65411d87" (UID: "8d075d1a-fc52-433c-80ec-661a65411d87"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.217893 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjlzh\" (UniqueName: \"kubernetes.io/projected/8d075d1a-fc52-433c-80ec-661a65411d87-kube-api-access-pjlzh\") on node \"crc\" DevicePath \"\"" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.217918 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.217928 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d075d1a-fc52-433c-80ec-661a65411d87-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.602390 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t75xr" event={"ID":"8d075d1a-fc52-433c-80ec-661a65411d87","Type":"ContainerDied","Data":"b30cad2513951d887fa647d96902d64ce9656a5d6afb53c329ca0f72ca3f1866"} Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.603290 4631 scope.go:117] "RemoveContainer" containerID="6045b9b719271e3528caad57300663ec3c856ec5836fab8dbbaaf886104fe220" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.602455 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t75xr" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.630868 4631 scope.go:117] "RemoveContainer" containerID="ae26d3fad9253306546bfcb36cb4274084ae8e49946f8c0bc5663f66fe5edc64" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.645097 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t75xr"] Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.648741 4631 scope.go:117] "RemoveContainer" containerID="62e96f136313ab30114bb400600c1aabfef60bdd99f90069da617211108fee06" Nov 29 05:14:37 crc kubenswrapper[4631]: I1129 05:14:37.660357 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-t75xr"] Nov 29 05:14:39 crc kubenswrapper[4631]: I1129 05:14:39.227910 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d075d1a-fc52-433c-80ec-661a65411d87" path="/var/lib/kubelet/pods/8d075d1a-fc52-433c-80ec-661a65411d87/volumes" Nov 29 05:14:50 crc kubenswrapper[4631]: I1129 05:14:50.715702 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 05:14:50 crc kubenswrapper[4631]: I1129 05:14:50.716146 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.193516 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5"] Nov 29 05:15:00 crc kubenswrapper[4631]: E1129 05:15:00.194444 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b65949eb-86cb-4b45-80aa-307a5b9dec99" containerName="container-00" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.194460 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b65949eb-86cb-4b45-80aa-307a5b9dec99" containerName="container-00" Nov 29 05:15:00 crc kubenswrapper[4631]: E1129 05:15:00.194493 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d075d1a-fc52-433c-80ec-661a65411d87" containerName="registry-server" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.194502 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d075d1a-fc52-433c-80ec-661a65411d87" containerName="registry-server" Nov 29 05:15:00 crc kubenswrapper[4631]: E1129 05:15:00.194518 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d075d1a-fc52-433c-80ec-661a65411d87" containerName="extract-content" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.194526 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d075d1a-fc52-433c-80ec-661a65411d87" containerName="extract-content" Nov 29 05:15:00 crc kubenswrapper[4631]: E1129 05:15:00.194548 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d075d1a-fc52-433c-80ec-661a65411d87" containerName="extract-utilities" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.194569 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d075d1a-fc52-433c-80ec-661a65411d87" containerName="extract-utilities" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.194813 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d075d1a-fc52-433c-80ec-661a65411d87" containerName="registry-server" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.194831 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="b65949eb-86cb-4b45-80aa-307a5b9dec99" containerName="container-00" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.195568 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.197356 4631 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.197801 4631 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.210111 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5"] Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.306475 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wpc7\" (UniqueName: \"kubernetes.io/projected/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-kube-api-access-4wpc7\") pod \"collect-profiles-29406555-lmxj5\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.306649 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-config-volume\") pod \"collect-profiles-29406555-lmxj5\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.306916 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-secret-volume\") pod \"collect-profiles-29406555-lmxj5\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.407691 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-secret-volume\") pod \"collect-profiles-29406555-lmxj5\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.408049 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wpc7\" (UniqueName: \"kubernetes.io/projected/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-kube-api-access-4wpc7\") pod \"collect-profiles-29406555-lmxj5\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.408104 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-config-volume\") pod \"collect-profiles-29406555-lmxj5\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.409065 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-config-volume\") pod \"collect-profiles-29406555-lmxj5\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.418178 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-secret-volume\") pod \"collect-profiles-29406555-lmxj5\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.443379 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wpc7\" (UniqueName: \"kubernetes.io/projected/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-kube-api-access-4wpc7\") pod \"collect-profiles-29406555-lmxj5\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.523262 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:00 crc kubenswrapper[4631]: I1129 05:15:00.970545 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5"] Nov 29 05:15:00 crc kubenswrapper[4631]: W1129 05:15:00.992779 4631 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e270d47_fd91_4cfd_a49b_54ab08fa0f03.slice/crio-2a68a50455bd55c3f98cdf2e4bd2c221166c1cdb2ed6b2dbf0577d2fa4e2bc0f WatchSource:0}: Error finding container 2a68a50455bd55c3f98cdf2e4bd2c221166c1cdb2ed6b2dbf0577d2fa4e2bc0f: Status 404 returned error can't find the container with id 2a68a50455bd55c3f98cdf2e4bd2c221166c1cdb2ed6b2dbf0577d2fa4e2bc0f Nov 29 05:15:01 crc kubenswrapper[4631]: I1129 05:15:01.826143 4631 generic.go:334] "Generic (PLEG): container finished" podID="2e270d47-fd91-4cfd-a49b-54ab08fa0f03" containerID="ac73d0739c6b0a27908ecfb4a1671f89720153207e7c0ca6be283a9398023f68" exitCode=0 Nov 29 05:15:01 crc kubenswrapper[4631]: I1129 05:15:01.826226 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" event={"ID":"2e270d47-fd91-4cfd-a49b-54ab08fa0f03","Type":"ContainerDied","Data":"ac73d0739c6b0a27908ecfb4a1671f89720153207e7c0ca6be283a9398023f68"} Nov 29 05:15:01 crc kubenswrapper[4631]: I1129 05:15:01.826658 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" event={"ID":"2e270d47-fd91-4cfd-a49b-54ab08fa0f03","Type":"ContainerStarted","Data":"2a68a50455bd55c3f98cdf2e4bd2c221166c1cdb2ed6b2dbf0577d2fa4e2bc0f"} Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.249521 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.259082 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-fb967fbcd-pqplm_f7aced22-8f95-4c19-b6c6-f56a84ae29e0/barbican-api/0.log" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.266632 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-config-volume\") pod \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.266818 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-secret-volume\") pod \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.267066 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wpc7\" (UniqueName: \"kubernetes.io/projected/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-kube-api-access-4wpc7\") pod \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\" (UID: \"2e270d47-fd91-4cfd-a49b-54ab08fa0f03\") " Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.267086 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-config-volume" (OuterVolumeSpecName: "config-volume") pod "2e270d47-fd91-4cfd-a49b-54ab08fa0f03" (UID: "2e270d47-fd91-4cfd-a49b-54ab08fa0f03"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.267571 4631 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-config-volume\") on node \"crc\" DevicePath \"\"" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.273146 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-kube-api-access-4wpc7" (OuterVolumeSpecName: "kube-api-access-4wpc7") pod "2e270d47-fd91-4cfd-a49b-54ab08fa0f03" (UID: "2e270d47-fd91-4cfd-a49b-54ab08fa0f03"). InnerVolumeSpecName "kube-api-access-4wpc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.292528 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2e270d47-fd91-4cfd-a49b-54ab08fa0f03" (UID: "2e270d47-fd91-4cfd-a49b-54ab08fa0f03"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.369143 4631 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.369436 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wpc7\" (UniqueName: \"kubernetes.io/projected/2e270d47-fd91-4cfd-a49b-54ab08fa0f03-kube-api-access-4wpc7\") on node \"crc\" DevicePath \"\"" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.382623 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-fb967fbcd-pqplm_f7aced22-8f95-4c19-b6c6-f56a84ae29e0/barbican-api-log/0.log" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.457076 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5555f74b94-58bwl_f3ff1d2b-0fc8-49dc-a02f-948b81d54988/barbican-keystone-listener/0.log" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.652108 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5555f74b94-58bwl_f3ff1d2b-0fc8-49dc-a02f-948b81d54988/barbican-keystone-listener-log/0.log" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.696261 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-777b8bd98c-blh5p_e5880470-8751-4613-82c7-33efabd35a6e/barbican-worker/0.log" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.828897 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-777b8bd98c-blh5p_e5880470-8751-4613-82c7-33efabd35a6e/barbican-worker-log/0.log" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.841971 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" event={"ID":"2e270d47-fd91-4cfd-a49b-54ab08fa0f03","Type":"ContainerDied","Data":"2a68a50455bd55c3f98cdf2e4bd2c221166c1cdb2ed6b2dbf0577d2fa4e2bc0f"} Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.842004 4631 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a68a50455bd55c3f98cdf2e4bd2c221166c1cdb2ed6b2dbf0577d2fa4e2bc0f" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.842053 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29406555-lmxj5" Nov 29 05:15:03 crc kubenswrapper[4631]: I1129 05:15:03.891850 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-xb6xj_c7c0f56e-3925-47a4-9516-9c9d662540db/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.051493 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ad1ef30-44b5-455b-86a8-136862164eba/ceilometer-central-agent/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.086959 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ad1ef30-44b5-455b-86a8-136862164eba/ceilometer-notification-agent/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.118759 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ad1ef30-44b5-455b-86a8-136862164eba/proxy-httpd/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.233676 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ad1ef30-44b5-455b-86a8-136862164eba/sg-core/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.339002 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k"] Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.349525 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_4bb283dc-d495-4398-a0d1-da97df47ffbd/cinder-api-log/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.349634 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29406510-2b84k"] Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.406996 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_4bb283dc-d495-4398-a0d1-da97df47ffbd/cinder-api/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.552022 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_33fff273-753a-43d7-ad1d-3d8dd9d3f373/cinder-scheduler/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.648658 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_33fff273-753a-43d7-ad1d-3d8dd9d3f373/probe/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.712834 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-26cwd_6cc48245-72ab-4e25-91b3-c98fe56e9869/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.915583 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-ngsg5_12e84ebd-9825-4ef2-9356-626fdc73dbb8/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:04 crc kubenswrapper[4631]: I1129 05:15:04.976673 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-9mfkk_9332abff-609f-424f-8f3c-b72b461489db/init/0.log" Nov 29 05:15:05 crc kubenswrapper[4631]: I1129 05:15:05.179501 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-9mfkk_9332abff-609f-424f-8f3c-b72b461489db/init/0.log" Nov 29 05:15:05 crc kubenswrapper[4631]: I1129 05:15:05.189574 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-8g7mj_919eea43-11e7-42f0-8d23-e46e7cbc5359/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:05 crc kubenswrapper[4631]: I1129 05:15:05.205765 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-9mfkk_9332abff-609f-424f-8f3c-b72b461489db/dnsmasq-dns/0.log" Nov 29 05:15:05 crc kubenswrapper[4631]: I1129 05:15:05.227053 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9335343-399a-4ce5-ae0a-927f764b6d04" path="/var/lib/kubelet/pods/b9335343-399a-4ce5-ae0a-927f764b6d04/volumes" Nov 29 05:15:05 crc kubenswrapper[4631]: I1129 05:15:05.387508 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_0b904c4b-913c-4fc2-8037-94300918d367/glance-log/0.log" Nov 29 05:15:05 crc kubenswrapper[4631]: I1129 05:15:05.391320 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_0b904c4b-913c-4fc2-8037-94300918d367/glance-httpd/0.log" Nov 29 05:15:05 crc kubenswrapper[4631]: I1129 05:15:05.606729 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_a356aef9-8e14-4c39-92b9-d32402e357ad/glance-httpd/0.log" Nov 29 05:15:05 crc kubenswrapper[4631]: I1129 05:15:05.624195 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_a356aef9-8e14-4c39-92b9-d32402e357ad/glance-log/0.log" Nov 29 05:15:05 crc kubenswrapper[4631]: I1129 05:15:05.836069 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5964d597b6-rfcr2_e2a6410f-6c69-4b87-a247-b285aef98b71/horizon/1.log" Nov 29 05:15:05 crc kubenswrapper[4631]: I1129 05:15:05.936259 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5964d597b6-rfcr2_e2a6410f-6c69-4b87-a247-b285aef98b71/horizon/0.log" Nov 29 05:15:06 crc kubenswrapper[4631]: I1129 05:15:06.122896 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-j9xq2_d39ef35e-1420-489c-9637-c89eb39ba398/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:06 crc kubenswrapper[4631]: I1129 05:15:06.158330 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5964d597b6-rfcr2_e2a6410f-6c69-4b87-a247-b285aef98b71/horizon-log/0.log" Nov 29 05:15:06 crc kubenswrapper[4631]: I1129 05:15:06.292386 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-6jptz_cf5098d8-d84f-4749-87ad-6772a3ac8b4e/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:06 crc kubenswrapper[4631]: I1129 05:15:06.549599 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29406541-z5vp6_1f61da44-0694-4d79-b156-8ed5d0358a3f/keystone-cron/0.log" Nov 29 05:15:06 crc kubenswrapper[4631]: I1129 05:15:06.577599 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6ff47d4689-gnj7t_7771bd0b-e533-499d-9b8a-9071eb930e26/keystone-api/0.log" Nov 29 05:15:06 crc kubenswrapper[4631]: I1129 05:15:06.672870 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_c4c8b52f-72c8-4aac-9c57-df83ec5dfe20/kube-state-metrics/0.log" Nov 29 05:15:06 crc kubenswrapper[4631]: I1129 05:15:06.812466 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-vr72m_4c75d7a8-5a02-4b4b-8af4-e83e594a096f/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:07 crc kubenswrapper[4631]: I1129 05:15:07.129799 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7d7446b849-nsq65_7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9/neutron-httpd/0.log" Nov 29 05:15:07 crc kubenswrapper[4631]: I1129 05:15:07.158730 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7d7446b849-nsq65_7dae32b9-ddcb-4ea1-89b7-98dfb4f604e9/neutron-api/0.log" Nov 29 05:15:07 crc kubenswrapper[4631]: I1129 05:15:07.494982 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-vl6t8_62539716-e710-4274-a860-22590e2d5861/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:07 crc kubenswrapper[4631]: I1129 05:15:07.679751 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_52e9bc32-6412-4929-be28-61ac7021c100/nova-api-log/0.log" Nov 29 05:15:07 crc kubenswrapper[4631]: I1129 05:15:07.950925 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_2c6f9871-305a-473f-8610-475ad792012a/nova-cell0-conductor-conductor/0.log" Nov 29 05:15:08 crc kubenswrapper[4631]: I1129 05:15:08.088143 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_52e9bc32-6412-4929-be28-61ac7021c100/nova-api-api/0.log" Nov 29 05:15:08 crc kubenswrapper[4631]: I1129 05:15:08.117027 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_b334d5ab-4b3b-436f-bf43-54c2a6a511b0/nova-cell1-conductor-conductor/0.log" Nov 29 05:15:08 crc kubenswrapper[4631]: I1129 05:15:08.253746 4631 scope.go:117] "RemoveContainer" containerID="66c0429d8cff4c7754aa9bdff6b721c69598820e8acd549f8e5cf1b408a901f5" Nov 29 05:15:08 crc kubenswrapper[4631]: I1129 05:15:08.330682 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_462ee7da-139d-4cd7-91c2-6bb6b02f9b57/nova-cell1-novncproxy-novncproxy/0.log" Nov 29 05:15:08 crc kubenswrapper[4631]: I1129 05:15:08.409215 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-42rzz_4dc57e41-ec49-4fda-86a7-2d339d19003b/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:08 crc kubenswrapper[4631]: I1129 05:15:08.637377 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_a5610ba4-568f-4281-90cd-7b4a187a9884/nova-metadata-log/0.log" Nov 29 05:15:09 crc kubenswrapper[4631]: I1129 05:15:09.026744 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c9989c8e-3a12-49c9-89e0-d13778a4c3d4/mysql-bootstrap/0.log" Nov 29 05:15:09 crc kubenswrapper[4631]: I1129 05:15:09.121858 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_6926c23d-5598-420e-b328-c95ffe4d1475/nova-scheduler-scheduler/0.log" Nov 29 05:15:09 crc kubenswrapper[4631]: I1129 05:15:09.298706 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c9989c8e-3a12-49c9-89e0-d13778a4c3d4/mysql-bootstrap/0.log" Nov 29 05:15:09 crc kubenswrapper[4631]: I1129 05:15:09.321115 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c9989c8e-3a12-49c9-89e0-d13778a4c3d4/galera/0.log" Nov 29 05:15:09 crc kubenswrapper[4631]: I1129 05:15:09.524523 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_2197f066-a879-4131-9e49-4d188a01db93/mysql-bootstrap/0.log" Nov 29 05:15:09 crc kubenswrapper[4631]: I1129 05:15:09.675149 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_2197f066-a879-4131-9e49-4d188a01db93/mysql-bootstrap/0.log" Nov 29 05:15:09 crc kubenswrapper[4631]: I1129 05:15:09.733397 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_2197f066-a879-4131-9e49-4d188a01db93/galera/0.log" Nov 29 05:15:09 crc kubenswrapper[4631]: I1129 05:15:09.885921 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_55accadf-0ac2-4a6e-a640-6b47845f939f/openstackclient/0.log" Nov 29 05:15:10 crc kubenswrapper[4631]: I1129 05:15:10.095661 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-fc5cp_9dac72cc-94dd-4863-92c6-99296142fafb/ovn-controller/0.log" Nov 29 05:15:10 crc kubenswrapper[4631]: I1129 05:15:10.192363 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_a5610ba4-568f-4281-90cd-7b4a187a9884/nova-metadata-metadata/0.log" Nov 29 05:15:10 crc kubenswrapper[4631]: I1129 05:15:10.301784 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wc5tp_3f2faef4-4711-4b58-9dd3-ee5bfc76dfaf/openstack-network-exporter/0.log" Nov 29 05:15:10 crc kubenswrapper[4631]: I1129 05:15:10.468991 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kl2kj_1334e52e-4dbd-4c2d-bd05-d19f59ef722b/ovsdb-server-init/0.log" Nov 29 05:15:10 crc kubenswrapper[4631]: I1129 05:15:10.660861 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kl2kj_1334e52e-4dbd-4c2d-bd05-d19f59ef722b/ovsdb-server/0.log" Nov 29 05:15:10 crc kubenswrapper[4631]: I1129 05:15:10.669010 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kl2kj_1334e52e-4dbd-4c2d-bd05-d19f59ef722b/ovsdb-server-init/0.log" Nov 29 05:15:10 crc kubenswrapper[4631]: I1129 05:15:10.683037 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-kl2kj_1334e52e-4dbd-4c2d-bd05-d19f59ef722b/ovs-vswitchd/0.log" Nov 29 05:15:10 crc kubenswrapper[4631]: I1129 05:15:10.890090 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-msq8v_1cfdc5a8-cd45-4b20-8ad9-a95c8e16414d/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:10 crc kubenswrapper[4631]: I1129 05:15:10.987231 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7108e5ce-c50c-44e2-971f-9a22a4370b52/openstack-network-exporter/0.log" Nov 29 05:15:11 crc kubenswrapper[4631]: I1129 05:15:11.005183 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7108e5ce-c50c-44e2-971f-9a22a4370b52/ovn-northd/0.log" Nov 29 05:15:11 crc kubenswrapper[4631]: I1129 05:15:11.233766 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5bd78ee0-c12e-4d6b-a47d-3652c3150c8d/ovsdbserver-nb/0.log" Nov 29 05:15:11 crc kubenswrapper[4631]: I1129 05:15:11.271886 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5bd78ee0-c12e-4d6b-a47d-3652c3150c8d/openstack-network-exporter/0.log" Nov 29 05:15:11 crc kubenswrapper[4631]: I1129 05:15:11.392878 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_c80339e5-63b2-451d-a7fb-25ef7a2fba6a/openstack-network-exporter/0.log" Nov 29 05:15:11 crc kubenswrapper[4631]: I1129 05:15:11.517453 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_c80339e5-63b2-451d-a7fb-25ef7a2fba6a/ovsdbserver-sb/0.log" Nov 29 05:15:11 crc kubenswrapper[4631]: I1129 05:15:11.696728 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6c8fbfb7d4-6m5ww_1d801d33-e580-4849-ab8b-6f2a21118b1f/placement-api/0.log" Nov 29 05:15:11 crc kubenswrapper[4631]: I1129 05:15:11.749211 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6c8fbfb7d4-6m5ww_1d801d33-e580-4849-ab8b-6f2a21118b1f/placement-log/0.log" Nov 29 05:15:11 crc kubenswrapper[4631]: I1129 05:15:11.877884 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_65e517ae-4586-44d4-b7d7-0f8f3f23e11f/setup-container/0.log" Nov 29 05:15:12 crc kubenswrapper[4631]: I1129 05:15:12.098621 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_65e517ae-4586-44d4-b7d7-0f8f3f23e11f/setup-container/0.log" Nov 29 05:15:12 crc kubenswrapper[4631]: I1129 05:15:12.210816 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_58d4504e-5552-4a9b-8ef8-6442b51cccf1/setup-container/0.log" Nov 29 05:15:12 crc kubenswrapper[4631]: I1129 05:15:12.225605 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_65e517ae-4586-44d4-b7d7-0f8f3f23e11f/rabbitmq/0.log" Nov 29 05:15:12 crc kubenswrapper[4631]: I1129 05:15:12.444464 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-zgb86_6b0158a9-97f1-46c7-a984-b1e6876d4b57/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:12 crc kubenswrapper[4631]: I1129 05:15:12.453529 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_58d4504e-5552-4a9b-8ef8-6442b51cccf1/rabbitmq/0.log" Nov 29 05:15:12 crc kubenswrapper[4631]: I1129 05:15:12.507610 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_58d4504e-5552-4a9b-8ef8-6442b51cccf1/setup-container/0.log" Nov 29 05:15:12 crc kubenswrapper[4631]: I1129 05:15:12.791107 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-tmj5d_811129e9-11e0-4619-b4ca-1779ce7ae461/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:12 crc kubenswrapper[4631]: I1129 05:15:12.803075 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-c777z_b9f1d0ab-6519-4eb8-aacc-539fb0e6433c/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:13 crc kubenswrapper[4631]: I1129 05:15:13.048677 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-gcswr_f32f1eab-b255-4518-ba67-d9a46362d4f7/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:13 crc kubenswrapper[4631]: I1129 05:15:13.092025 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-r2trh_25a7e95d-1575-46e8-9ab3-a21aa8f08b3d/ssh-known-hosts-edpm-deployment/0.log" Nov 29 05:15:13 crc kubenswrapper[4631]: I1129 05:15:13.409307 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-76cbc8bc95-pd9d4_dfe18059-91e5-40e0-a1df-f5f56cf4c0d2/proxy-server/0.log" Nov 29 05:15:13 crc kubenswrapper[4631]: I1129 05:15:13.448113 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-76cbc8bc95-pd9d4_dfe18059-91e5-40e0-a1df-f5f56cf4c0d2/proxy-httpd/0.log" Nov 29 05:15:13 crc kubenswrapper[4631]: I1129 05:15:13.599925 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-s8gk2_cfd7f275-e7d1-4239-b55a-b0566664e6bf/swift-ring-rebalance/0.log" Nov 29 05:15:13 crc kubenswrapper[4631]: I1129 05:15:13.752456 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/account-auditor/0.log" Nov 29 05:15:13 crc kubenswrapper[4631]: I1129 05:15:13.826347 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/account-reaper/0.log" Nov 29 05:15:13 crc kubenswrapper[4631]: I1129 05:15:13.871817 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/account-replicator/0.log" Nov 29 05:15:13 crc kubenswrapper[4631]: I1129 05:15:13.973531 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/container-auditor/0.log" Nov 29 05:15:13 crc kubenswrapper[4631]: I1129 05:15:13.979676 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/account-server/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.087821 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/container-replicator/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.259966 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/container-updater/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.269507 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/container-server/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.310788 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/object-auditor/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.341667 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/object-expirer/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.475034 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/object-server/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.507132 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/object-updater/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.554108 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/object-replicator/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.573506 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/rsync/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.684020 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_874bb6b3-16cb-4d17-bf8b-6d3593d727d0/swift-recon-cron/0.log" Nov 29 05:15:14 crc kubenswrapper[4631]: I1129 05:15:14.869356 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-cjxtc_38877ce3-8e06-44be-9be6-4abb374c32fa/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:15 crc kubenswrapper[4631]: I1129 05:15:15.197085 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_cc72d19d-c3cb-4f95-9dc2-27f017d8eaeb/tempest-tests-tempest-tests-runner/0.log" Nov 29 05:15:15 crc kubenswrapper[4631]: I1129 05:15:15.339509 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_446545e5-fff2-4f7e-960c-26bf33c121bc/test-operator-logs-container/0.log" Nov 29 05:15:15 crc kubenswrapper[4631]: I1129 05:15:15.475439 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-dpxdn_b4ca6673-e7b6-4f1c-b3ec-edb6b7451d93/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 29 05:15:20 crc kubenswrapper[4631]: I1129 05:15:20.715571 4631 patch_prober.go:28] interesting pod/machine-config-daemon-6bmtd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 29 05:15:20 crc kubenswrapper[4631]: I1129 05:15:20.716033 4631 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 29 05:15:20 crc kubenswrapper[4631]: I1129 05:15:20.716077 4631 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" Nov 29 05:15:20 crc kubenswrapper[4631]: I1129 05:15:20.716807 4631 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8"} pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 29 05:15:20 crc kubenswrapper[4631]: I1129 05:15:20.716850 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerName="machine-config-daemon" containerID="cri-o://5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" gracePeriod=600 Nov 29 05:15:20 crc kubenswrapper[4631]: E1129 05:15:20.849897 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:15:21 crc kubenswrapper[4631]: I1129 05:15:21.000462 4631 generic.go:334] "Generic (PLEG): container finished" podID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" exitCode=0 Nov 29 05:15:21 crc kubenswrapper[4631]: I1129 05:15:21.000505 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerDied","Data":"5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8"} Nov 29 05:15:21 crc kubenswrapper[4631]: I1129 05:15:21.000538 4631 scope.go:117] "RemoveContainer" containerID="b6c56a99240518977d61f2c7d5677497c6ecb3a6e851512ce81f757de601d59d" Nov 29 05:15:21 crc kubenswrapper[4631]: I1129 05:15:21.001211 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:15:21 crc kubenswrapper[4631]: E1129 05:15:21.001488 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:15:24 crc kubenswrapper[4631]: I1129 05:15:24.368553 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_a706cd5e-48d4-44a0-b8f5-b97ac5e39a16/memcached/0.log" Nov 29 05:15:34 crc kubenswrapper[4631]: I1129 05:15:34.217018 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:15:34 crc kubenswrapper[4631]: E1129 05:15:34.218078 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:15:42 crc kubenswrapper[4631]: I1129 05:15:42.554352 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/util/0.log" Nov 29 05:15:42 crc kubenswrapper[4631]: I1129 05:15:42.690740 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/util/0.log" Nov 29 05:15:42 crc kubenswrapper[4631]: I1129 05:15:42.706399 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/pull/0.log" Nov 29 05:15:42 crc kubenswrapper[4631]: I1129 05:15:42.753574 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/pull/0.log" Nov 29 05:15:42 crc kubenswrapper[4631]: I1129 05:15:42.896651 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/pull/0.log" Nov 29 05:15:42 crc kubenswrapper[4631]: I1129 05:15:42.916355 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/util/0.log" Nov 29 05:15:42 crc kubenswrapper[4631]: I1129 05:15:42.967170 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_76e278c9c6e53d228e0007182ebb1a9ffdd2f8044cc4b07fff1d3b703199922_0cb55e1f-fe62-4278-8b61-a5836e3a6946/extract/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.143910 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-dp4b9_51f70cd4-a679-426f-9467-1702bb980ada/manager/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.169914 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-dp4b9_51f70cd4-a679-426f-9467-1702bb980ada/kube-rbac-proxy/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.181057 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-q4tvc_75e40b24-8291-44fe-bd37-97d493e2c136/kube-rbac-proxy/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.398467 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-q4tvc_75e40b24-8291-44fe-bd37-97d493e2c136/manager/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.402380 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-kzfsd_fc3a4db2-6980-4bc4-aa20-8340eecc513e/manager/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.445245 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-kzfsd_fc3a4db2-6980-4bc4-aa20-8340eecc513e/kube-rbac-proxy/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.549909 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-668d9c48b9-md78v_7214fe12-0140-464c-a856-b1b5482bb635/kube-rbac-proxy/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.668322 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-668d9c48b9-md78v_7214fe12-0140-464c-a856-b1b5482bb635/manager/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.772868 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-h8x8k_5fc71d02-38a2-4998-8cab-e334a10fcd5c/kube-rbac-proxy/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.824053 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-h8x8k_5fc71d02-38a2-4998-8cab-e334a10fcd5c/manager/0.log" Nov 29 05:15:43 crc kubenswrapper[4631]: I1129 05:15:43.907625 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-5jzhg_1ec069eb-26b3-408c-a4ba-118d01436ecd/kube-rbac-proxy/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.001797 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-5jzhg_1ec069eb-26b3-408c-a4ba-118d01436ecd/manager/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.073557 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-zhzc9_7e78b781-84b7-4915-837a-ed1a45d1201e/kube-rbac-proxy/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.276722 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-zhzc9_7e78b781-84b7-4915-837a-ed1a45d1201e/manager/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.293703 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-qp8p2_53a59934-39b4-4b0b-bf3d-da06f41ccf7f/manager/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.324944 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-qp8p2_53a59934-39b4-4b0b-bf3d-da06f41ccf7f/kube-rbac-proxy/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.546180 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-6c69d4788d-4q485_a0087618-94aa-4b5f-a590-9e976a84cbbf/kube-rbac-proxy/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.554257 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-6c69d4788d-4q485_a0087618-94aa-4b5f-a590-9e976a84cbbf/manager/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.741283 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-96kdd_46dcc222-f54d-4ddd-bc12-71fd2cfc989c/manager/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.774655 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-96kdd_46dcc222-f54d-4ddd-bc12-71fd2cfc989c/kube-rbac-proxy/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.913289 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-plz68_f4f6e611-da9a-42cb-99f8-59b9784b2671/kube-rbac-proxy/0.log" Nov 29 05:15:44 crc kubenswrapper[4631]: I1129 05:15:44.929105 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-plz68_f4f6e611-da9a-42cb-99f8-59b9784b2671/manager/0.log" Nov 29 05:15:45 crc kubenswrapper[4631]: I1129 05:15:45.034747 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-lp8wb_9429868a-7e85-4c45-a3ff-e05af34c9854/kube-rbac-proxy/0.log" Nov 29 05:15:45 crc kubenswrapper[4631]: I1129 05:15:45.138246 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-lp8wb_9429868a-7e85-4c45-a3ff-e05af34c9854/manager/0.log" Nov 29 05:15:45 crc kubenswrapper[4631]: I1129 05:15:45.250981 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-6mrwx_01ca8f91-4e45-4bb2-a44f-a17d6701e529/kube-rbac-proxy/0.log" Nov 29 05:15:45 crc kubenswrapper[4631]: I1129 05:15:45.269715 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-6mrwx_01ca8f91-4e45-4bb2-a44f-a17d6701e529/manager/0.log" Nov 29 05:15:45 crc kubenswrapper[4631]: I1129 05:15:45.423393 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-qm568_f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb/kube-rbac-proxy/0.log" Nov 29 05:15:45 crc kubenswrapper[4631]: I1129 05:15:45.551314 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-qm568_f42d6bcb-b085-4dae-acbc-bdc8cd80c5bb/manager/0.log" Nov 29 05:15:45 crc kubenswrapper[4631]: I1129 05:15:45.615189 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf_072818bb-f7b6-4dbc-9885-a3a8c68f9494/kube-rbac-proxy/0.log" Nov 29 05:15:45 crc kubenswrapper[4631]: I1129 05:15:45.635299 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd4zn8zf_072818bb-f7b6-4dbc-9885-a3a8c68f9494/manager/0.log" Nov 29 05:15:46 crc kubenswrapper[4631]: I1129 05:15:46.084345 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5478ff79b4-nggr2_d0f57542-df49-4195-91b5-1fc784cba518/operator/0.log" Nov 29 05:15:46 crc kubenswrapper[4631]: I1129 05:15:46.258323 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-4wzxd_d04d138a-44a6-4666-aa58-8a225d975235/registry-server/0.log" Nov 29 05:15:46 crc kubenswrapper[4631]: I1129 05:15:46.427315 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-j9s5g_82eecaa9-4289-4d37-b953-7c2de1f5a437/kube-rbac-proxy/0.log" Nov 29 05:15:46 crc kubenswrapper[4631]: I1129 05:15:46.548420 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-5xmrz_9812178e-08d5-487d-b42e-1edcca79850b/kube-rbac-proxy/0.log" Nov 29 05:15:46 crc kubenswrapper[4631]: I1129 05:15:46.562047 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-j9s5g_82eecaa9-4289-4d37-b953-7c2de1f5a437/manager/0.log" Nov 29 05:15:46 crc kubenswrapper[4631]: I1129 05:15:46.698807 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-5xmrz_9812178e-08d5-487d-b42e-1edcca79850b/manager/0.log" Nov 29 05:15:46 crc kubenswrapper[4631]: I1129 05:15:46.851580 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-79cbf6968-9cwcq_cb8f6e48-60ac-497b-ab0a-8d556f77a1ce/manager/0.log" Nov 29 05:15:46 crc kubenswrapper[4631]: I1129 05:15:46.862202 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-97pk4_99a5846d-1348-421d-9637-cbd86e552f1c/operator/0.log" Nov 29 05:15:46 crc kubenswrapper[4631]: I1129 05:15:46.961344 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-rgxmh_7956b653-1bf2-4bec-8246-0a806ef0716d/kube-rbac-proxy/0.log" Nov 29 05:15:47 crc kubenswrapper[4631]: I1129 05:15:47.093767 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-rgxmh_7956b653-1bf2-4bec-8246-0a806ef0716d/manager/0.log" Nov 29 05:15:47 crc kubenswrapper[4631]: I1129 05:15:47.150082 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-spqmt_d1f8729d-3838-42f0-9185-6b4edb74a90f/kube-rbac-proxy/0.log" Nov 29 05:15:47 crc kubenswrapper[4631]: I1129 05:15:47.224957 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-spqmt_d1f8729d-3838-42f0-9185-6b4edb74a90f/manager/0.log" Nov 29 05:15:47 crc kubenswrapper[4631]: I1129 05:15:47.365646 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-7kqbn_fa9bc4b5-9bea-48a5-8d01-1f2cd1957133/kube-rbac-proxy/0.log" Nov 29 05:15:47 crc kubenswrapper[4631]: I1129 05:15:47.425813 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-7kqbn_fa9bc4b5-9bea-48a5-8d01-1f2cd1957133/manager/0.log" Nov 29 05:15:47 crc kubenswrapper[4631]: I1129 05:15:47.490097 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-mns7w_ae831fd7-f5a8-4427-a3c7-64ae0a86281f/kube-rbac-proxy/0.log" Nov 29 05:15:47 crc kubenswrapper[4631]: I1129 05:15:47.500997 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-mns7w_ae831fd7-f5a8-4427-a3c7-64ae0a86281f/manager/0.log" Nov 29 05:15:48 crc kubenswrapper[4631]: I1129 05:15:48.216842 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:15:48 crc kubenswrapper[4631]: E1129 05:15:48.217123 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:16:02 crc kubenswrapper[4631]: I1129 05:16:02.216494 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:16:02 crc kubenswrapper[4631]: E1129 05:16:02.217108 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:16:07 crc kubenswrapper[4631]: I1129 05:16:07.659910 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-x8t8v_8361abea-c6bc-4927-a88b-c8318096d60d/control-plane-machine-set-operator/0.log" Nov 29 05:16:07 crc kubenswrapper[4631]: I1129 05:16:07.787175 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-txv66_5e9aad10-398a-479a-b828-100682ad67c7/kube-rbac-proxy/0.log" Nov 29 05:16:07 crc kubenswrapper[4631]: I1129 05:16:07.808592 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-txv66_5e9aad10-398a-479a-b828-100682ad67c7/machine-api-operator/0.log" Nov 29 05:16:12 crc kubenswrapper[4631]: I1129 05:16:12.846501 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-l9fff"] Nov 29 05:16:12 crc kubenswrapper[4631]: E1129 05:16:12.847591 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e270d47-fd91-4cfd-a49b-54ab08fa0f03" containerName="collect-profiles" Nov 29 05:16:12 crc kubenswrapper[4631]: I1129 05:16:12.847608 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e270d47-fd91-4cfd-a49b-54ab08fa0f03" containerName="collect-profiles" Nov 29 05:16:12 crc kubenswrapper[4631]: I1129 05:16:12.847881 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e270d47-fd91-4cfd-a49b-54ab08fa0f03" containerName="collect-profiles" Nov 29 05:16:12 crc kubenswrapper[4631]: I1129 05:16:12.850185 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:12 crc kubenswrapper[4631]: I1129 05:16:12.873430 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l9fff"] Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.014887 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-catalog-content\") pod \"redhat-operators-l9fff\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.021571 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-utilities\") pod \"redhat-operators-l9fff\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.022089 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-248kq\" (UniqueName: \"kubernetes.io/projected/403cf8e3-4815-4c30-bb96-685f72ab31b5-kube-api-access-248kq\") pod \"redhat-operators-l9fff\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.123943 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-248kq\" (UniqueName: \"kubernetes.io/projected/403cf8e3-4815-4c30-bb96-685f72ab31b5-kube-api-access-248kq\") pod \"redhat-operators-l9fff\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.124006 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-catalog-content\") pod \"redhat-operators-l9fff\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.124030 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-utilities\") pod \"redhat-operators-l9fff\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.124473 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-utilities\") pod \"redhat-operators-l9fff\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.124693 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-catalog-content\") pod \"redhat-operators-l9fff\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.144754 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-248kq\" (UniqueName: \"kubernetes.io/projected/403cf8e3-4815-4c30-bb96-685f72ab31b5-kube-api-access-248kq\") pod \"redhat-operators-l9fff\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.194365 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:13 crc kubenswrapper[4631]: I1129 05:16:13.729124 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l9fff"] Nov 29 05:16:14 crc kubenswrapper[4631]: I1129 05:16:14.490891 4631 generic.go:334] "Generic (PLEG): container finished" podID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerID="ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634" exitCode=0 Nov 29 05:16:14 crc kubenswrapper[4631]: I1129 05:16:14.490971 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9fff" event={"ID":"403cf8e3-4815-4c30-bb96-685f72ab31b5","Type":"ContainerDied","Data":"ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634"} Nov 29 05:16:14 crc kubenswrapper[4631]: I1129 05:16:14.497544 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9fff" event={"ID":"403cf8e3-4815-4c30-bb96-685f72ab31b5","Type":"ContainerStarted","Data":"bfcda62ab6019bad2bee27e910e765d68314640438a1d0e30b26d7f0c4328b96"} Nov 29 05:16:15 crc kubenswrapper[4631]: I1129 05:16:15.506792 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9fff" event={"ID":"403cf8e3-4815-4c30-bb96-685f72ab31b5","Type":"ContainerStarted","Data":"950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d"} Nov 29 05:16:16 crc kubenswrapper[4631]: I1129 05:16:16.216655 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:16:16 crc kubenswrapper[4631]: E1129 05:16:16.217411 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:16:19 crc kubenswrapper[4631]: I1129 05:16:19.545697 4631 generic.go:334] "Generic (PLEG): container finished" podID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerID="950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d" exitCode=0 Nov 29 05:16:19 crc kubenswrapper[4631]: I1129 05:16:19.545763 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9fff" event={"ID":"403cf8e3-4815-4c30-bb96-685f72ab31b5","Type":"ContainerDied","Data":"950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d"} Nov 29 05:16:20 crc kubenswrapper[4631]: I1129 05:16:20.555265 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9fff" event={"ID":"403cf8e3-4815-4c30-bb96-685f72ab31b5","Type":"ContainerStarted","Data":"656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95"} Nov 29 05:16:20 crc kubenswrapper[4631]: I1129 05:16:20.577355 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-l9fff" podStartSLOduration=2.918406518 podStartE2EDuration="8.577339547s" podCreationTimestamp="2025-11-29 05:16:12 +0000 UTC" firstStartedPulling="2025-11-29 05:16:14.493237365 +0000 UTC m=+3901.557740879" lastFinishedPulling="2025-11-29 05:16:20.152170394 +0000 UTC m=+3907.216673908" observedRunningTime="2025-11-29 05:16:20.571399441 +0000 UTC m=+3907.635902955" watchObservedRunningTime="2025-11-29 05:16:20.577339547 +0000 UTC m=+3907.641843061" Nov 29 05:16:21 crc kubenswrapper[4631]: I1129 05:16:21.726318 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-5gqxn_82882099-8669-4d95-a03f-7da7a69b3865/cert-manager-controller/0.log" Nov 29 05:16:21 crc kubenswrapper[4631]: I1129 05:16:21.829664 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-n4mtg_9f4942a9-5091-4ae9-b7ba-9e9aa329161f/cert-manager-cainjector/0.log" Nov 29 05:16:21 crc kubenswrapper[4631]: I1129 05:16:21.941241 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-66b6l_1cff1f89-8342-4b11-98a6-c6d2cb2bed76/cert-manager-webhook/0.log" Nov 29 05:16:23 crc kubenswrapper[4631]: I1129 05:16:23.195201 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:23 crc kubenswrapper[4631]: I1129 05:16:23.195256 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:24 crc kubenswrapper[4631]: I1129 05:16:24.239682 4631 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-l9fff" podUID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerName="registry-server" probeResult="failure" output=< Nov 29 05:16:24 crc kubenswrapper[4631]: timeout: failed to connect service ":50051" within 1s Nov 29 05:16:24 crc kubenswrapper[4631]: > Nov 29 05:16:30 crc kubenswrapper[4631]: I1129 05:16:30.216523 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:16:30 crc kubenswrapper[4631]: E1129 05:16:30.217193 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:16:33 crc kubenswrapper[4631]: I1129 05:16:33.266272 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:33 crc kubenswrapper[4631]: I1129 05:16:33.361048 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:33 crc kubenswrapper[4631]: I1129 05:16:33.516135 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l9fff"] Nov 29 05:16:34 crc kubenswrapper[4631]: I1129 05:16:34.671295 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-l9fff" podUID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerName="registry-server" containerID="cri-o://656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95" gracePeriod=2 Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.108653 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.146674 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-utilities\") pod \"403cf8e3-4815-4c30-bb96-685f72ab31b5\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.146780 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-catalog-content\") pod \"403cf8e3-4815-4c30-bb96-685f72ab31b5\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.146961 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-248kq\" (UniqueName: \"kubernetes.io/projected/403cf8e3-4815-4c30-bb96-685f72ab31b5-kube-api-access-248kq\") pod \"403cf8e3-4815-4c30-bb96-685f72ab31b5\" (UID: \"403cf8e3-4815-4c30-bb96-685f72ab31b5\") " Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.147458 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-utilities" (OuterVolumeSpecName: "utilities") pod "403cf8e3-4815-4c30-bb96-685f72ab31b5" (UID: "403cf8e3-4815-4c30-bb96-685f72ab31b5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.154532 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/403cf8e3-4815-4c30-bb96-685f72ab31b5-kube-api-access-248kq" (OuterVolumeSpecName: "kube-api-access-248kq") pod "403cf8e3-4815-4c30-bb96-685f72ab31b5" (UID: "403cf8e3-4815-4c30-bb96-685f72ab31b5"). InnerVolumeSpecName "kube-api-access-248kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.247296 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "403cf8e3-4815-4c30-bb96-685f72ab31b5" (UID: "403cf8e3-4815-4c30-bb96-685f72ab31b5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.248913 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.248950 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-248kq\" (UniqueName: \"kubernetes.io/projected/403cf8e3-4815-4c30-bb96-685f72ab31b5-kube-api-access-248kq\") on node \"crc\" DevicePath \"\"" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.248965 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/403cf8e3-4815-4c30-bb96-685f72ab31b5-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.680665 4631 generic.go:334] "Generic (PLEG): container finished" podID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerID="656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95" exitCode=0 Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.681358 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9fff" event={"ID":"403cf8e3-4815-4c30-bb96-685f72ab31b5","Type":"ContainerDied","Data":"656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95"} Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.681432 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9fff" event={"ID":"403cf8e3-4815-4c30-bb96-685f72ab31b5","Type":"ContainerDied","Data":"bfcda62ab6019bad2bee27e910e765d68314640438a1d0e30b26d7f0c4328b96"} Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.681492 4631 scope.go:117] "RemoveContainer" containerID="656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.681671 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9fff" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.706849 4631 scope.go:117] "RemoveContainer" containerID="950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.715355 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l9fff"] Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.726215 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-l9fff"] Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.853873 4631 scope.go:117] "RemoveContainer" containerID="ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.895795 4631 scope.go:117] "RemoveContainer" containerID="656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95" Nov 29 05:16:35 crc kubenswrapper[4631]: E1129 05:16:35.896160 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95\": container with ID starting with 656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95 not found: ID does not exist" containerID="656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.896198 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95"} err="failed to get container status \"656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95\": rpc error: code = NotFound desc = could not find container \"656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95\": container with ID starting with 656bc00b1a96db379cdef98b3c050a336c50cfb3bccc757ef9dcf6d347387a95 not found: ID does not exist" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.896237 4631 scope.go:117] "RemoveContainer" containerID="950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d" Nov 29 05:16:35 crc kubenswrapper[4631]: E1129 05:16:35.897613 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d\": container with ID starting with 950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d not found: ID does not exist" containerID="950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.897654 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d"} err="failed to get container status \"950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d\": rpc error: code = NotFound desc = could not find container \"950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d\": container with ID starting with 950f19d489ce8d595e9bb97d2ec9be24ac11004157339e8dd494912ead2ab96d not found: ID does not exist" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.897683 4631 scope.go:117] "RemoveContainer" containerID="ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634" Nov 29 05:16:35 crc kubenswrapper[4631]: E1129 05:16:35.899126 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634\": container with ID starting with ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634 not found: ID does not exist" containerID="ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634" Nov 29 05:16:35 crc kubenswrapper[4631]: I1129 05:16:35.899165 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634"} err="failed to get container status \"ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634\": rpc error: code = NotFound desc = could not find container \"ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634\": container with ID starting with ba30b722747ca2a60577f7ac9cb7dd653f30a231c354f5486acce902cd4be634 not found: ID does not exist" Nov 29 05:16:37 crc kubenswrapper[4631]: I1129 05:16:37.228027 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="403cf8e3-4815-4c30-bb96-685f72ab31b5" path="/var/lib/kubelet/pods/403cf8e3-4815-4c30-bb96-685f72ab31b5/volumes" Nov 29 05:16:39 crc kubenswrapper[4631]: I1129 05:16:39.227504 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-tlptj_09679895-91f4-4a46-a9cb-03bb2bd32537/nmstate-console-plugin/0.log" Nov 29 05:16:39 crc kubenswrapper[4631]: I1129 05:16:39.417834 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-dmhbd_fbd8e3f0-5167-4724-8f85-8648acdb3f6b/nmstate-handler/0.log" Nov 29 05:16:39 crc kubenswrapper[4631]: I1129 05:16:39.489027 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-ncfds_f1476f66-a509-4f8a-937f-b7b5e906a7e2/kube-rbac-proxy/0.log" Nov 29 05:16:39 crc kubenswrapper[4631]: I1129 05:16:39.511401 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-ncfds_f1476f66-a509-4f8a-937f-b7b5e906a7e2/nmstate-metrics/0.log" Nov 29 05:16:39 crc kubenswrapper[4631]: I1129 05:16:39.675375 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-jrhjf_7183e04c-6dbe-4139-8bd2-a217adae2ab6/nmstate-operator/0.log" Nov 29 05:16:39 crc kubenswrapper[4631]: I1129 05:16:39.724211 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-xjsng_a232adff-6fb5-4120-8f41-8bf310322024/nmstate-webhook/0.log" Nov 29 05:16:42 crc kubenswrapper[4631]: I1129 05:16:42.216255 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:16:42 crc kubenswrapper[4631]: E1129 05:16:42.216969 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.425460 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9xhr7"] Nov 29 05:16:52 crc kubenswrapper[4631]: E1129 05:16:52.426975 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerName="extract-content" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.427055 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerName="extract-content" Nov 29 05:16:52 crc kubenswrapper[4631]: E1129 05:16:52.427126 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerName="registry-server" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.427174 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerName="registry-server" Nov 29 05:16:52 crc kubenswrapper[4631]: E1129 05:16:52.427243 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerName="extract-utilities" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.427291 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerName="extract-utilities" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.427545 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="403cf8e3-4815-4c30-bb96-685f72ab31b5" containerName="registry-server" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.428926 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.449467 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9xhr7"] Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.505110 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bt9fg\" (UniqueName: \"kubernetes.io/projected/1284f723-5943-4ff5-8ba8-f4f8e479ac19-kube-api-access-bt9fg\") pod \"community-operators-9xhr7\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.505487 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-utilities\") pod \"community-operators-9xhr7\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.505649 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-catalog-content\") pod \"community-operators-9xhr7\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.612852 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bt9fg\" (UniqueName: \"kubernetes.io/projected/1284f723-5943-4ff5-8ba8-f4f8e479ac19-kube-api-access-bt9fg\") pod \"community-operators-9xhr7\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.612938 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-utilities\") pod \"community-operators-9xhr7\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.612982 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-catalog-content\") pod \"community-operators-9xhr7\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.613625 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-catalog-content\") pod \"community-operators-9xhr7\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.613775 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-utilities\") pod \"community-operators-9xhr7\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.646917 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bt9fg\" (UniqueName: \"kubernetes.io/projected/1284f723-5943-4ff5-8ba8-f4f8e479ac19-kube-api-access-bt9fg\") pod \"community-operators-9xhr7\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:52 crc kubenswrapper[4631]: I1129 05:16:52.753749 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:16:53 crc kubenswrapper[4631]: I1129 05:16:53.223867 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:16:53 crc kubenswrapper[4631]: E1129 05:16:53.224553 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:16:53 crc kubenswrapper[4631]: I1129 05:16:53.367853 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9xhr7"] Nov 29 05:16:53 crc kubenswrapper[4631]: I1129 05:16:53.830592 4631 generic.go:334] "Generic (PLEG): container finished" podID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerID="014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b" exitCode=0 Nov 29 05:16:53 crc kubenswrapper[4631]: I1129 05:16:53.830669 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9xhr7" event={"ID":"1284f723-5943-4ff5-8ba8-f4f8e479ac19","Type":"ContainerDied","Data":"014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b"} Nov 29 05:16:53 crc kubenswrapper[4631]: I1129 05:16:53.832680 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9xhr7" event={"ID":"1284f723-5943-4ff5-8ba8-f4f8e479ac19","Type":"ContainerStarted","Data":"8e01aa12bf462edd9a16b229f535889bb5082bc5e9ef282e5ca0981316a4b770"} Nov 29 05:16:55 crc kubenswrapper[4631]: I1129 05:16:55.863543 4631 generic.go:334] "Generic (PLEG): container finished" podID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerID="0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5" exitCode=0 Nov 29 05:16:55 crc kubenswrapper[4631]: I1129 05:16:55.863828 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9xhr7" event={"ID":"1284f723-5943-4ff5-8ba8-f4f8e479ac19","Type":"ContainerDied","Data":"0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5"} Nov 29 05:16:56 crc kubenswrapper[4631]: I1129 05:16:56.873834 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9xhr7" event={"ID":"1284f723-5943-4ff5-8ba8-f4f8e479ac19","Type":"ContainerStarted","Data":"13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65"} Nov 29 05:16:56 crc kubenswrapper[4631]: I1129 05:16:56.895424 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9xhr7" podStartSLOduration=2.227243825 podStartE2EDuration="4.895406491s" podCreationTimestamp="2025-11-29 05:16:52 +0000 UTC" firstStartedPulling="2025-11-29 05:16:53.832300963 +0000 UTC m=+3940.896804477" lastFinishedPulling="2025-11-29 05:16:56.500463629 +0000 UTC m=+3943.564967143" observedRunningTime="2025-11-29 05:16:56.891547416 +0000 UTC m=+3943.956050930" watchObservedRunningTime="2025-11-29 05:16:56.895406491 +0000 UTC m=+3943.959910005" Nov 29 05:16:58 crc kubenswrapper[4631]: I1129 05:16:58.689695 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-nbzls_40e9d6d5-fd7e-4962-aace-e0fe711eb77d/kube-rbac-proxy/0.log" Nov 29 05:16:58 crc kubenswrapper[4631]: I1129 05:16:58.897585 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-nbzls_40e9d6d5-fd7e-4962-aace-e0fe711eb77d/controller/0.log" Nov 29 05:16:58 crc kubenswrapper[4631]: I1129 05:16:58.932230 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-ktl8z_7d0c7592-e989-41cd-b1bb-4ec52e694973/frr-k8s-webhook-server/0.log" Nov 29 05:16:59 crc kubenswrapper[4631]: I1129 05:16:59.145418 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-frr-files/0.log" Nov 29 05:16:59 crc kubenswrapper[4631]: I1129 05:16:59.383847 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-metrics/0.log" Nov 29 05:16:59 crc kubenswrapper[4631]: I1129 05:16:59.404145 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-frr-files/0.log" Nov 29 05:16:59 crc kubenswrapper[4631]: I1129 05:16:59.431280 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-reloader/0.log" Nov 29 05:16:59 crc kubenswrapper[4631]: I1129 05:16:59.480150 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-reloader/0.log" Nov 29 05:16:59 crc kubenswrapper[4631]: I1129 05:16:59.664310 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-frr-files/0.log" Nov 29 05:16:59 crc kubenswrapper[4631]: I1129 05:16:59.770034 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-metrics/0.log" Nov 29 05:16:59 crc kubenswrapper[4631]: I1129 05:16:59.781807 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-metrics/0.log" Nov 29 05:16:59 crc kubenswrapper[4631]: I1129 05:16:59.800037 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-reloader/0.log" Nov 29 05:16:59 crc kubenswrapper[4631]: I1129 05:16:59.943082 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-frr-files/0.log" Nov 29 05:17:00 crc kubenswrapper[4631]: I1129 05:17:00.001069 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-reloader/0.log" Nov 29 05:17:00 crc kubenswrapper[4631]: I1129 05:17:00.002280 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/controller/0.log" Nov 29 05:17:00 crc kubenswrapper[4631]: I1129 05:17:00.008949 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/cp-metrics/0.log" Nov 29 05:17:00 crc kubenswrapper[4631]: I1129 05:17:00.238756 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/kube-rbac-proxy/0.log" Nov 29 05:17:00 crc kubenswrapper[4631]: I1129 05:17:00.317682 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/kube-rbac-proxy-frr/0.log" Nov 29 05:17:00 crc kubenswrapper[4631]: I1129 05:17:00.318500 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/frr-metrics/0.log" Nov 29 05:17:00 crc kubenswrapper[4631]: I1129 05:17:00.531965 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/reloader/0.log" Nov 29 05:17:00 crc kubenswrapper[4631]: I1129 05:17:00.594687 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-86d54cd4bb-mfrnm_b9fde2d9-7731-458b-9e00-216a755d629c/manager/0.log" Nov 29 05:17:01 crc kubenswrapper[4631]: I1129 05:17:01.261160 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zpqkq_67c6fb55-67d7-4d86-8071-1a5e2d13e338/frr/0.log" Nov 29 05:17:01 crc kubenswrapper[4631]: I1129 05:17:01.372573 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-8598fb747-q99bx_e1c922f9-f18e-4d4c-a1ff-b5f7a8bde9ab/webhook-server/0.log" Nov 29 05:17:01 crc kubenswrapper[4631]: I1129 05:17:01.653688 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-rjtb7_b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d/kube-rbac-proxy/0.log" Nov 29 05:17:01 crc kubenswrapper[4631]: I1129 05:17:01.885000 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-rjtb7_b0c23c6a-ceeb-4f3a-b7d6-a7122944a96d/speaker/0.log" Nov 29 05:17:02 crc kubenswrapper[4631]: I1129 05:17:02.754402 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:17:02 crc kubenswrapper[4631]: I1129 05:17:02.754491 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:17:02 crc kubenswrapper[4631]: I1129 05:17:02.816891 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:17:02 crc kubenswrapper[4631]: I1129 05:17:02.965935 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:17:03 crc kubenswrapper[4631]: I1129 05:17:03.046572 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9xhr7"] Nov 29 05:17:04 crc kubenswrapper[4631]: I1129 05:17:04.937726 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9xhr7" podUID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerName="registry-server" containerID="cri-o://13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65" gracePeriod=2 Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.710855 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.774916 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bt9fg\" (UniqueName: \"kubernetes.io/projected/1284f723-5943-4ff5-8ba8-f4f8e479ac19-kube-api-access-bt9fg\") pod \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.774970 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-utilities\") pod \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.775180 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-catalog-content\") pod \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\" (UID: \"1284f723-5943-4ff5-8ba8-f4f8e479ac19\") " Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.775768 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-utilities" (OuterVolumeSpecName: "utilities") pod "1284f723-5943-4ff5-8ba8-f4f8e479ac19" (UID: "1284f723-5943-4ff5-8ba8-f4f8e479ac19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.783619 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1284f723-5943-4ff5-8ba8-f4f8e479ac19-kube-api-access-bt9fg" (OuterVolumeSpecName: "kube-api-access-bt9fg") pod "1284f723-5943-4ff5-8ba8-f4f8e479ac19" (UID: "1284f723-5943-4ff5-8ba8-f4f8e479ac19"). InnerVolumeSpecName "kube-api-access-bt9fg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.823084 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1284f723-5943-4ff5-8ba8-f4f8e479ac19" (UID: "1284f723-5943-4ff5-8ba8-f4f8e479ac19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.877571 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bt9fg\" (UniqueName: \"kubernetes.io/projected/1284f723-5943-4ff5-8ba8-f4f8e479ac19-kube-api-access-bt9fg\") on node \"crc\" DevicePath \"\"" Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.877610 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.877623 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1284f723-5943-4ff5-8ba8-f4f8e479ac19-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.950019 4631 generic.go:334] "Generic (PLEG): container finished" podID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerID="13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65" exitCode=0 Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.950074 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9xhr7" event={"ID":"1284f723-5943-4ff5-8ba8-f4f8e479ac19","Type":"ContainerDied","Data":"13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65"} Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.950106 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9xhr7" event={"ID":"1284f723-5943-4ff5-8ba8-f4f8e479ac19","Type":"ContainerDied","Data":"8e01aa12bf462edd9a16b229f535889bb5082bc5e9ef282e5ca0981316a4b770"} Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.950127 4631 scope.go:117] "RemoveContainer" containerID="13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65" Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.950320 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9xhr7" Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.979452 4631 scope.go:117] "RemoveContainer" containerID="0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5" Nov 29 05:17:05 crc kubenswrapper[4631]: I1129 05:17:05.993246 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9xhr7"] Nov 29 05:17:06 crc kubenswrapper[4631]: I1129 05:17:06.001437 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9xhr7"] Nov 29 05:17:06 crc kubenswrapper[4631]: I1129 05:17:06.022015 4631 scope.go:117] "RemoveContainer" containerID="014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b" Nov 29 05:17:06 crc kubenswrapper[4631]: I1129 05:17:06.051479 4631 scope.go:117] "RemoveContainer" containerID="13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65" Nov 29 05:17:06 crc kubenswrapper[4631]: E1129 05:17:06.053149 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65\": container with ID starting with 13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65 not found: ID does not exist" containerID="13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65" Nov 29 05:17:06 crc kubenswrapper[4631]: I1129 05:17:06.053183 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65"} err="failed to get container status \"13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65\": rpc error: code = NotFound desc = could not find container \"13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65\": container with ID starting with 13b12bbc10a92504b3683247ea7851a2d83151851d692dbb64d5f2357e393b65 not found: ID does not exist" Nov 29 05:17:06 crc kubenswrapper[4631]: I1129 05:17:06.053204 4631 scope.go:117] "RemoveContainer" containerID="0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5" Nov 29 05:17:06 crc kubenswrapper[4631]: E1129 05:17:06.053748 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5\": container with ID starting with 0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5 not found: ID does not exist" containerID="0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5" Nov 29 05:17:06 crc kubenswrapper[4631]: I1129 05:17:06.053795 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5"} err="failed to get container status \"0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5\": rpc error: code = NotFound desc = could not find container \"0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5\": container with ID starting with 0aedb6d56f350f0f2e30caa3c9975eef60a612b864f5191dfbee41accde71ac5 not found: ID does not exist" Nov 29 05:17:06 crc kubenswrapper[4631]: I1129 05:17:06.053842 4631 scope.go:117] "RemoveContainer" containerID="014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b" Nov 29 05:17:06 crc kubenswrapper[4631]: E1129 05:17:06.054148 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b\": container with ID starting with 014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b not found: ID does not exist" containerID="014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b" Nov 29 05:17:06 crc kubenswrapper[4631]: I1129 05:17:06.054180 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b"} err="failed to get container status \"014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b\": rpc error: code = NotFound desc = could not find container \"014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b\": container with ID starting with 014b7abcd47c88cc7ba5e99e5d073bccdda128b2f2264eaffff5b0ad2b14378b not found: ID does not exist" Nov 29 05:17:06 crc kubenswrapper[4631]: I1129 05:17:06.216792 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:17:06 crc kubenswrapper[4631]: E1129 05:17:06.217180 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:17:07 crc kubenswrapper[4631]: I1129 05:17:07.228182 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" path="/var/lib/kubelet/pods/1284f723-5943-4ff5-8ba8-f4f8e479ac19/volumes" Nov 29 05:17:18 crc kubenswrapper[4631]: I1129 05:17:18.431362 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/util/0.log" Nov 29 05:17:18 crc kubenswrapper[4631]: I1129 05:17:18.608246 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/util/0.log" Nov 29 05:17:18 crc kubenswrapper[4631]: I1129 05:17:18.674901 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/pull/0.log" Nov 29 05:17:18 crc kubenswrapper[4631]: I1129 05:17:18.675030 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/pull/0.log" Nov 29 05:17:18 crc kubenswrapper[4631]: I1129 05:17:18.840827 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/util/0.log" Nov 29 05:17:18 crc kubenswrapper[4631]: I1129 05:17:18.910898 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/extract/0.log" Nov 29 05:17:18 crc kubenswrapper[4631]: I1129 05:17:18.984036 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fscxf9_6d4ed5ca-bb17-4681-9640-03977d0545eb/pull/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.045961 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/util/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.222466 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/util/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.253388 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/pull/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.266678 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/pull/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.489236 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/util/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.490400 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/pull/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.521209 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83rz5hh_42828980-6a57-467a-85e4-690f84a7f368/extract/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.696391 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-utilities/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.928502 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-content/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.931770 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-utilities/0.log" Nov 29 05:17:19 crc kubenswrapper[4631]: I1129 05:17:19.975061 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-content/0.log" Nov 29 05:17:20 crc kubenswrapper[4631]: I1129 05:17:20.216472 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:17:20 crc kubenswrapper[4631]: E1129 05:17:20.216666 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:17:20 crc kubenswrapper[4631]: I1129 05:17:20.579052 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-utilities/0.log" Nov 29 05:17:20 crc kubenswrapper[4631]: I1129 05:17:20.618952 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/extract-content/0.log" Nov 29 05:17:20 crc kubenswrapper[4631]: I1129 05:17:20.881893 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-utilities/0.log" Nov 29 05:17:21 crc kubenswrapper[4631]: I1129 05:17:21.317149 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-p5b4b_1e28fdaa-deb3-42c8-8b84-0feac17ca652/registry-server/0.log" Nov 29 05:17:21 crc kubenswrapper[4631]: I1129 05:17:21.518394 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-utilities/0.log" Nov 29 05:17:21 crc kubenswrapper[4631]: I1129 05:17:21.599354 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-content/0.log" Nov 29 05:17:21 crc kubenswrapper[4631]: I1129 05:17:21.668259 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-content/0.log" Nov 29 05:17:21 crc kubenswrapper[4631]: I1129 05:17:21.765853 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-content/0.log" Nov 29 05:17:21 crc kubenswrapper[4631]: I1129 05:17:21.816308 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/extract-utilities/0.log" Nov 29 05:17:22 crc kubenswrapper[4631]: I1129 05:17:22.154786 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-94k48_15665c92-a9d7-4bf9-807a-9f80ce56d8ac/marketplace-operator/0.log" Nov 29 05:17:22 crc kubenswrapper[4631]: I1129 05:17:22.158724 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-sqckp_6fad24c1-8766-4462-a5fb-997b472d2952/registry-server/0.log" Nov 29 05:17:22 crc kubenswrapper[4631]: I1129 05:17:22.314558 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-utilities/0.log" Nov 29 05:17:22 crc kubenswrapper[4631]: I1129 05:17:22.500187 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-utilities/0.log" Nov 29 05:17:22 crc kubenswrapper[4631]: I1129 05:17:22.725065 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-content/0.log" Nov 29 05:17:22 crc kubenswrapper[4631]: I1129 05:17:22.763615 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-content/0.log" Nov 29 05:17:22 crc kubenswrapper[4631]: I1129 05:17:22.984704 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-utilities/0.log" Nov 29 05:17:23 crc kubenswrapper[4631]: I1129 05:17:23.170808 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/extract-content/0.log" Nov 29 05:17:23 crc kubenswrapper[4631]: I1129 05:17:23.195868 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mzpdn_8e16e7df-c41e-47e9-a5c5-2e6af04decf7/registry-server/0.log" Nov 29 05:17:23 crc kubenswrapper[4631]: I1129 05:17:23.208298 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-utilities/0.log" Nov 29 05:17:23 crc kubenswrapper[4631]: I1129 05:17:23.297131 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-utilities/0.log" Nov 29 05:17:23 crc kubenswrapper[4631]: I1129 05:17:23.402434 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-content/0.log" Nov 29 05:17:23 crc kubenswrapper[4631]: I1129 05:17:23.476182 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-content/0.log" Nov 29 05:17:23 crc kubenswrapper[4631]: I1129 05:17:23.588380 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-utilities/0.log" Nov 29 05:17:23 crc kubenswrapper[4631]: I1129 05:17:23.643983 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/extract-content/0.log" Nov 29 05:17:24 crc kubenswrapper[4631]: I1129 05:17:24.089298 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vrqjc_1a5eb073-d0f6-4980-b381-dfa3d6cab81e/registry-server/0.log" Nov 29 05:17:35 crc kubenswrapper[4631]: I1129 05:17:35.217131 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:17:35 crc kubenswrapper[4631]: E1129 05:17:35.217855 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:17:47 crc kubenswrapper[4631]: I1129 05:17:47.222911 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:17:47 crc kubenswrapper[4631]: E1129 05:17:47.223715 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:17:59 crc kubenswrapper[4631]: I1129 05:17:59.216808 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:17:59 crc kubenswrapper[4631]: E1129 05:17:59.217674 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:18:10 crc kubenswrapper[4631]: I1129 05:18:10.217463 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:18:10 crc kubenswrapper[4631]: E1129 05:18:10.220982 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:18:24 crc kubenswrapper[4631]: I1129 05:18:24.217804 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:18:24 crc kubenswrapper[4631]: E1129 05:18:24.218599 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:18:36 crc kubenswrapper[4631]: I1129 05:18:36.217228 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:18:36 crc kubenswrapper[4631]: E1129 05:18:36.217876 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:18:50 crc kubenswrapper[4631]: I1129 05:18:50.216914 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:18:50 crc kubenswrapper[4631]: E1129 05:18:50.217731 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:19:02 crc kubenswrapper[4631]: I1129 05:19:02.218885 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:19:02 crc kubenswrapper[4631]: E1129 05:19:02.219992 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:19:14 crc kubenswrapper[4631]: I1129 05:19:14.450661 4631 generic.go:334] "Generic (PLEG): container finished" podID="b21fae68-736e-42d5-86e8-c7288e5dc464" containerID="924c2209320d75b5494126faa60f48435b46207623bbeefaf505ec8b8f1fc855" exitCode=0 Nov 29 05:19:14 crc kubenswrapper[4631]: I1129 05:19:14.450870 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qgrhs/must-gather-l7sbn" event={"ID":"b21fae68-736e-42d5-86e8-c7288e5dc464","Type":"ContainerDied","Data":"924c2209320d75b5494126faa60f48435b46207623bbeefaf505ec8b8f1fc855"} Nov 29 05:19:14 crc kubenswrapper[4631]: I1129 05:19:14.452796 4631 scope.go:117] "RemoveContainer" containerID="924c2209320d75b5494126faa60f48435b46207623bbeefaf505ec8b8f1fc855" Nov 29 05:19:15 crc kubenswrapper[4631]: I1129 05:19:15.037081 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qgrhs_must-gather-l7sbn_b21fae68-736e-42d5-86e8-c7288e5dc464/gather/0.log" Nov 29 05:19:15 crc kubenswrapper[4631]: I1129 05:19:15.217547 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:19:15 crc kubenswrapper[4631]: E1129 05:19:15.218313 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:19:25 crc kubenswrapper[4631]: I1129 05:19:25.369454 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qgrhs/must-gather-l7sbn"] Nov 29 05:19:25 crc kubenswrapper[4631]: I1129 05:19:25.370298 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-qgrhs/must-gather-l7sbn" podUID="b21fae68-736e-42d5-86e8-c7288e5dc464" containerName="copy" containerID="cri-o://8cf94ce82c7b6ab79c2070f54be0199a7c750d2070a22df9ad4ad3a8bc46a64b" gracePeriod=2 Nov 29 05:19:25 crc kubenswrapper[4631]: I1129 05:19:25.381321 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qgrhs/must-gather-l7sbn"] Nov 29 05:19:25 crc kubenswrapper[4631]: I1129 05:19:25.620944 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qgrhs_must-gather-l7sbn_b21fae68-736e-42d5-86e8-c7288e5dc464/copy/0.log" Nov 29 05:19:25 crc kubenswrapper[4631]: I1129 05:19:25.621726 4631 generic.go:334] "Generic (PLEG): container finished" podID="b21fae68-736e-42d5-86e8-c7288e5dc464" containerID="8cf94ce82c7b6ab79c2070f54be0199a7c750d2070a22df9ad4ad3a8bc46a64b" exitCode=143 Nov 29 05:19:25 crc kubenswrapper[4631]: I1129 05:19:25.993049 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qgrhs_must-gather-l7sbn_b21fae68-736e-42d5-86e8-c7288e5dc464/copy/0.log" Nov 29 05:19:25 crc kubenswrapper[4631]: I1129 05:19:25.993553 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/must-gather-l7sbn" Nov 29 05:19:25 crc kubenswrapper[4631]: I1129 05:19:25.995255 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b21fae68-736e-42d5-86e8-c7288e5dc464-must-gather-output\") pod \"b21fae68-736e-42d5-86e8-c7288e5dc464\" (UID: \"b21fae68-736e-42d5-86e8-c7288e5dc464\") " Nov 29 05:19:25 crc kubenswrapper[4631]: I1129 05:19:25.995426 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2qrm\" (UniqueName: \"kubernetes.io/projected/b21fae68-736e-42d5-86e8-c7288e5dc464-kube-api-access-h2qrm\") pod \"b21fae68-736e-42d5-86e8-c7288e5dc464\" (UID: \"b21fae68-736e-42d5-86e8-c7288e5dc464\") " Nov 29 05:19:26 crc kubenswrapper[4631]: I1129 05:19:26.022598 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b21fae68-736e-42d5-86e8-c7288e5dc464-kube-api-access-h2qrm" (OuterVolumeSpecName: "kube-api-access-h2qrm") pod "b21fae68-736e-42d5-86e8-c7288e5dc464" (UID: "b21fae68-736e-42d5-86e8-c7288e5dc464"). InnerVolumeSpecName "kube-api-access-h2qrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:19:26 crc kubenswrapper[4631]: I1129 05:19:26.096717 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2qrm\" (UniqueName: \"kubernetes.io/projected/b21fae68-736e-42d5-86e8-c7288e5dc464-kube-api-access-h2qrm\") on node \"crc\" DevicePath \"\"" Nov 29 05:19:26 crc kubenswrapper[4631]: I1129 05:19:26.137846 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b21fae68-736e-42d5-86e8-c7288e5dc464-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "b21fae68-736e-42d5-86e8-c7288e5dc464" (UID: "b21fae68-736e-42d5-86e8-c7288e5dc464"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:19:26 crc kubenswrapper[4631]: I1129 05:19:26.198899 4631 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b21fae68-736e-42d5-86e8-c7288e5dc464-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 29 05:19:26 crc kubenswrapper[4631]: I1129 05:19:26.637119 4631 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qgrhs_must-gather-l7sbn_b21fae68-736e-42d5-86e8-c7288e5dc464/copy/0.log" Nov 29 05:19:26 crc kubenswrapper[4631]: I1129 05:19:26.637836 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qgrhs/must-gather-l7sbn" Nov 29 05:19:26 crc kubenswrapper[4631]: I1129 05:19:26.637837 4631 scope.go:117] "RemoveContainer" containerID="8cf94ce82c7b6ab79c2070f54be0199a7c750d2070a22df9ad4ad3a8bc46a64b" Nov 29 05:19:26 crc kubenswrapper[4631]: I1129 05:19:26.669951 4631 scope.go:117] "RemoveContainer" containerID="924c2209320d75b5494126faa60f48435b46207623bbeefaf505ec8b8f1fc855" Nov 29 05:19:27 crc kubenswrapper[4631]: I1129 05:19:27.222226 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:19:27 crc kubenswrapper[4631]: E1129 05:19:27.223165 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:19:27 crc kubenswrapper[4631]: I1129 05:19:27.231056 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b21fae68-736e-42d5-86e8-c7288e5dc464" path="/var/lib/kubelet/pods/b21fae68-736e-42d5-86e8-c7288e5dc464/volumes" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.490177 4631 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jprb8"] Nov 29 05:19:34 crc kubenswrapper[4631]: E1129 05:19:34.490993 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b21fae68-736e-42d5-86e8-c7288e5dc464" containerName="copy" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.491004 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b21fae68-736e-42d5-86e8-c7288e5dc464" containerName="copy" Nov 29 05:19:34 crc kubenswrapper[4631]: E1129 05:19:34.491022 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerName="registry-server" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.491028 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerName="registry-server" Nov 29 05:19:34 crc kubenswrapper[4631]: E1129 05:19:34.491047 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b21fae68-736e-42d5-86e8-c7288e5dc464" containerName="gather" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.491055 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="b21fae68-736e-42d5-86e8-c7288e5dc464" containerName="gather" Nov 29 05:19:34 crc kubenswrapper[4631]: E1129 05:19:34.491075 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerName="extract-utilities" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.491080 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerName="extract-utilities" Nov 29 05:19:34 crc kubenswrapper[4631]: E1129 05:19:34.491093 4631 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerName="extract-content" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.491100 4631 state_mem.go:107] "Deleted CPUSet assignment" podUID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerName="extract-content" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.491282 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="1284f723-5943-4ff5-8ba8-f4f8e479ac19" containerName="registry-server" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.491307 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="b21fae68-736e-42d5-86e8-c7288e5dc464" containerName="gather" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.491318 4631 memory_manager.go:354] "RemoveStaleState removing state" podUID="b21fae68-736e-42d5-86e8-c7288e5dc464" containerName="copy" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.492598 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.511084 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jprb8"] Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.569713 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-utilities\") pod \"redhat-marketplace-jprb8\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.569769 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqdhr\" (UniqueName: \"kubernetes.io/projected/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-kube-api-access-wqdhr\") pod \"redhat-marketplace-jprb8\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.569832 4631 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-catalog-content\") pod \"redhat-marketplace-jprb8\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.671040 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-catalog-content\") pod \"redhat-marketplace-jprb8\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.671175 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-utilities\") pod \"redhat-marketplace-jprb8\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.671209 4631 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqdhr\" (UniqueName: \"kubernetes.io/projected/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-kube-api-access-wqdhr\") pod \"redhat-marketplace-jprb8\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.671756 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-catalog-content\") pod \"redhat-marketplace-jprb8\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.672044 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-utilities\") pod \"redhat-marketplace-jprb8\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.693512 4631 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqdhr\" (UniqueName: \"kubernetes.io/projected/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-kube-api-access-wqdhr\") pod \"redhat-marketplace-jprb8\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:34 crc kubenswrapper[4631]: I1129 05:19:34.815814 4631 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:35 crc kubenswrapper[4631]: I1129 05:19:35.138829 4631 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jprb8"] Nov 29 05:19:35 crc kubenswrapper[4631]: I1129 05:19:35.733741 4631 generic.go:334] "Generic (PLEG): container finished" podID="9de39d42-b2e2-42a2-a2ff-fe0e695f0be9" containerID="4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f" exitCode=0 Nov 29 05:19:35 crc kubenswrapper[4631]: I1129 05:19:35.733840 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jprb8" event={"ID":"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9","Type":"ContainerDied","Data":"4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f"} Nov 29 05:19:35 crc kubenswrapper[4631]: I1129 05:19:35.734091 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jprb8" event={"ID":"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9","Type":"ContainerStarted","Data":"684e17b325c9cb20d79e38c14c30ac948f168484e88fc41e3cab3894457e32ea"} Nov 29 05:19:35 crc kubenswrapper[4631]: I1129 05:19:35.736088 4631 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 29 05:19:37 crc kubenswrapper[4631]: I1129 05:19:37.757148 4631 generic.go:334] "Generic (PLEG): container finished" podID="9de39d42-b2e2-42a2-a2ff-fe0e695f0be9" containerID="2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1" exitCode=0 Nov 29 05:19:37 crc kubenswrapper[4631]: I1129 05:19:37.757213 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jprb8" event={"ID":"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9","Type":"ContainerDied","Data":"2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1"} Nov 29 05:19:38 crc kubenswrapper[4631]: I1129 05:19:38.772662 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jprb8" event={"ID":"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9","Type":"ContainerStarted","Data":"08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66"} Nov 29 05:19:38 crc kubenswrapper[4631]: I1129 05:19:38.798012 4631 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jprb8" podStartSLOduration=2.018351115 podStartE2EDuration="4.797994067s" podCreationTimestamp="2025-11-29 05:19:34 +0000 UTC" firstStartedPulling="2025-11-29 05:19:35.735871509 +0000 UTC m=+4102.800375023" lastFinishedPulling="2025-11-29 05:19:38.515514451 +0000 UTC m=+4105.580017975" observedRunningTime="2025-11-29 05:19:38.788672998 +0000 UTC m=+4105.853176522" watchObservedRunningTime="2025-11-29 05:19:38.797994067 +0000 UTC m=+4105.862497591" Nov 29 05:19:42 crc kubenswrapper[4631]: I1129 05:19:42.217168 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:19:42 crc kubenswrapper[4631]: E1129 05:19:42.217840 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:19:44 crc kubenswrapper[4631]: I1129 05:19:44.816768 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:44 crc kubenswrapper[4631]: I1129 05:19:44.817058 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:44 crc kubenswrapper[4631]: I1129 05:19:44.892013 4631 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:44 crc kubenswrapper[4631]: I1129 05:19:44.972074 4631 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:46 crc kubenswrapper[4631]: I1129 05:19:46.058909 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jprb8"] Nov 29 05:19:46 crc kubenswrapper[4631]: I1129 05:19:46.871909 4631 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jprb8" podUID="9de39d42-b2e2-42a2-a2ff-fe0e695f0be9" containerName="registry-server" containerID="cri-o://08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66" gracePeriod=2 Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.342914 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.404640 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-utilities\") pod \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.404831 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-catalog-content\") pod \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.404968 4631 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wqdhr\" (UniqueName: \"kubernetes.io/projected/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-kube-api-access-wqdhr\") pod \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\" (UID: \"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9\") " Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.405569 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-utilities" (OuterVolumeSpecName: "utilities") pod "9de39d42-b2e2-42a2-a2ff-fe0e695f0be9" (UID: "9de39d42-b2e2-42a2-a2ff-fe0e695f0be9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.406242 4631 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-utilities\") on node \"crc\" DevicePath \"\"" Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.411315 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-kube-api-access-wqdhr" (OuterVolumeSpecName: "kube-api-access-wqdhr") pod "9de39d42-b2e2-42a2-a2ff-fe0e695f0be9" (UID: "9de39d42-b2e2-42a2-a2ff-fe0e695f0be9"). InnerVolumeSpecName "kube-api-access-wqdhr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.425365 4631 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9de39d42-b2e2-42a2-a2ff-fe0e695f0be9" (UID: "9de39d42-b2e2-42a2-a2ff-fe0e695f0be9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.508098 4631 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.508135 4631 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wqdhr\" (UniqueName: \"kubernetes.io/projected/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9-kube-api-access-wqdhr\") on node \"crc\" DevicePath \"\"" Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.887081 4631 generic.go:334] "Generic (PLEG): container finished" podID="9de39d42-b2e2-42a2-a2ff-fe0e695f0be9" containerID="08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66" exitCode=0 Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.887149 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jprb8" event={"ID":"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9","Type":"ContainerDied","Data":"08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66"} Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.887174 4631 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jprb8" Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.887194 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jprb8" event={"ID":"9de39d42-b2e2-42a2-a2ff-fe0e695f0be9","Type":"ContainerDied","Data":"684e17b325c9cb20d79e38c14c30ac948f168484e88fc41e3cab3894457e32ea"} Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.887224 4631 scope.go:117] "RemoveContainer" containerID="08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66" Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.939468 4631 scope.go:117] "RemoveContainer" containerID="2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1" Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.944933 4631 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jprb8"] Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.965150 4631 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jprb8"] Nov 29 05:19:47 crc kubenswrapper[4631]: I1129 05:19:47.991092 4631 scope.go:117] "RemoveContainer" containerID="4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f" Nov 29 05:19:48 crc kubenswrapper[4631]: I1129 05:19:48.036998 4631 scope.go:117] "RemoveContainer" containerID="08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66" Nov 29 05:19:48 crc kubenswrapper[4631]: E1129 05:19:48.037607 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66\": container with ID starting with 08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66 not found: ID does not exist" containerID="08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66" Nov 29 05:19:48 crc kubenswrapper[4631]: I1129 05:19:48.037664 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66"} err="failed to get container status \"08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66\": rpc error: code = NotFound desc = could not find container \"08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66\": container with ID starting with 08a8212a1b4dbee4fa706081d3fc50b008ff801a440adf5f4cedb05344839b66 not found: ID does not exist" Nov 29 05:19:48 crc kubenswrapper[4631]: I1129 05:19:48.037722 4631 scope.go:117] "RemoveContainer" containerID="2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1" Nov 29 05:19:48 crc kubenswrapper[4631]: E1129 05:19:48.038609 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1\": container with ID starting with 2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1 not found: ID does not exist" containerID="2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1" Nov 29 05:19:48 crc kubenswrapper[4631]: I1129 05:19:48.038657 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1"} err="failed to get container status \"2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1\": rpc error: code = NotFound desc = could not find container \"2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1\": container with ID starting with 2125df367c368b5a17ababb96ba68b82147218d0cd37575efdc93aa16a431fd1 not found: ID does not exist" Nov 29 05:19:48 crc kubenswrapper[4631]: I1129 05:19:48.038689 4631 scope.go:117] "RemoveContainer" containerID="4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f" Nov 29 05:19:48 crc kubenswrapper[4631]: E1129 05:19:48.039094 4631 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f\": container with ID starting with 4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f not found: ID does not exist" containerID="4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f" Nov 29 05:19:48 crc kubenswrapper[4631]: I1129 05:19:48.039137 4631 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f"} err="failed to get container status \"4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f\": rpc error: code = NotFound desc = could not find container \"4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f\": container with ID starting with 4979fde583f3715dc60927c4f95dfb50838b62f71053d93ef4b429eea421aa9f not found: ID does not exist" Nov 29 05:19:49 crc kubenswrapper[4631]: I1129 05:19:49.231249 4631 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9de39d42-b2e2-42a2-a2ff-fe0e695f0be9" path="/var/lib/kubelet/pods/9de39d42-b2e2-42a2-a2ff-fe0e695f0be9/volumes" Nov 29 05:19:56 crc kubenswrapper[4631]: I1129 05:19:56.217811 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:19:56 crc kubenswrapper[4631]: E1129 05:19:56.218752 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:20:08 crc kubenswrapper[4631]: I1129 05:20:08.494170 4631 scope.go:117] "RemoveContainer" containerID="fea6c20cdd7fbba09b5f9822955f9cb9a5f7e46575a458f4218c377b883c18db" Nov 29 05:20:10 crc kubenswrapper[4631]: I1129 05:20:10.216897 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:20:10 crc kubenswrapper[4631]: E1129 05:20:10.217436 4631 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bmtd_openshift-machine-config-operator(cddaf389-3216-4be7-a91d-8bed4a7bb9e9)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" podUID="cddaf389-3216-4be7-a91d-8bed4a7bb9e9" Nov 29 05:20:24 crc kubenswrapper[4631]: I1129 05:20:24.217142 4631 scope.go:117] "RemoveContainer" containerID="5209a526b395a038e248ff422a6ad81f02e8ba91f5e2cd8f0f63c5c346b594f8" Nov 29 05:20:25 crc kubenswrapper[4631]: I1129 05:20:25.325519 4631 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bmtd" event={"ID":"cddaf389-3216-4be7-a91d-8bed4a7bb9e9","Type":"ContainerStarted","Data":"3486c4090fb32a66da1149fdd995242506c098961a4f51a9cd3906b072dfa315"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112501411024434 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112501412017352 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112470572016511 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112470572015461 5ustar corecore